diff options
author | Dave Airlie <airlied@redhat.com> | 2012-11-19 18:22:35 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2012-11-19 18:22:35 -0500 |
commit | 9fabd4eedeb904173d05cb1ced3c3e6b9d2e8137 (patch) | |
tree | ff5ebc768e1c83446db6b899016e5560b41d36ca /drivers/gpu | |
parent | 6380813c6e316455b944ba5f7b1515c98b837850 (diff) | |
parent | 6b8294a4d392c2c9f8867e8505511f3fc9419ba7 (diff) |
Merge branch 'for-airlied' of git://people.freedesktop.org/~danvet/drm-intel into drm-next
Daniel writes:
Highlights of this -next round:
- ivb fdi B/C fixes
- hsw sprite/plane offset fixes from Damien
- unified dp/hdmi encoder for hsw, finally external dp support on hsw
(Paulo)
- kill-agp and some other prep work in the gtt code from Ben
- some fb handling fixes from Ville
- massive pile of patches to align hsw VGA with the spec and make it
actually work (Paulo)
- pile of workarounds from Jesse, mostly for vlv, but also some other
related platforms
- start of a dev_priv reorg, that thing grew out of bounds and chaotic
- small bits&pieces all over the place, down to better error handling for
load-detect on gen2 (Chris, Jani, Mika, Zhenyu, ...)
On top of the previous pile (just copypasta):
- tons of hsw dp prep patches form Paulo
- round scheduled work items and timers to nearest second (Chris)
- some hw workarounds (Jesse&Damien)
- vlv dp support and related fixups (Vijay et al.)
- basic haswell dp support, not yet wired up for external ports (Paulo)
- edp support (Paulo)
- tons of refactorings to prepare for the above (Paulo)
- panel rework, unifiying code between lvds and edp panels (Jani)
- panel fitter scaling modes (Jani + Yuly Novikov)
- panel power improvements, should now work without the BIOS setting it up
- extracting some dp helpers from radeon/i915 and move them to
drm_dp_helper.c
- randome pile of workarounds (Damien, Ben, ...)
- some cleanups for the register restore code for suspend/resume
- secure batchbuffer support, should enable tear-free blits on gen6+
Chris)
- random smaller fixlets and cleanups.
* 'for-airlied' of git://people.freedesktop.org/~danvet/drm-intel: (231 commits)
drm/i915: Restore physical HWS_PGA after resume
drm/i915: Report amount of usable graphics memory in MiB
drm/i915/i2c: Track users of GMBUS force-bit
drm/i915: Allocate the proper size for contexts.
drm/i915: Update load-detect failure paths for modeset-rework
drm/i915: Clear unused fields of mode for framebuffer creation
drm/i915: Always calculate 8xx WM values based on a 32-bpp framebuffer
drm/i915: Fix sparse warnings in from AGP kill code
drm/i915: Missed lock change with rps lock
drm/i915: Move the remaining gtt code
drm/i915: flush system agent TLBs on SNB
drm/i915: Kill off now unused gen6+ AGP code
drm/i915: Calculate correct stolen size for GEN7+
drm/i915: Stop using AGP layer for GEN6+
drm/i915: drop the double-OP_STOREDW usage in blt_ring_flush
drm/i915: don't rewrite the GTT on resume v4
drm/i915: protect RPS/RC6 related accesses (including PCU) with a new mutex
drm/i915: put ring frequency and turbo setup into a work queue v5
drm/i915: don't block resume on fb console resume v2
drm/i915: extract l3_parity substruct from dev_priv
...
Diffstat (limited to 'drivers/gpu')
35 files changed, 5034 insertions, 2532 deletions
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 2ff5cefe9ead..dc4e88f9fb11 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile | |||
@@ -19,7 +19,7 @@ drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o | |||
19 | 19 | ||
20 | drm-usb-y := drm_usb.o | 20 | drm-usb-y := drm_usb.o |
21 | 21 | ||
22 | drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o | 22 | drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_helper.o |
23 | drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o | 23 | drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o |
24 | drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o | 24 | drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o |
25 | 25 | ||
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 1227adf74dbc..710516815de7 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -39,6 +39,24 @@ | |||
39 | #include <drm/drm_fb_helper.h> | 39 | #include <drm/drm_fb_helper.h> |
40 | #include <drm/drm_edid.h> | 40 | #include <drm/drm_edid.h> |
41 | 41 | ||
42 | void drm_helper_move_panel_connectors_to_head(struct drm_device *dev) | ||
43 | { | ||
44 | struct drm_connector *connector, *tmp; | ||
45 | struct list_head panel_list; | ||
46 | |||
47 | INIT_LIST_HEAD(&panel_list); | ||
48 | |||
49 | list_for_each_entry_safe(connector, tmp, | ||
50 | &dev->mode_config.connector_list, head) { | ||
51 | if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS || | ||
52 | connector->connector_type == DRM_MODE_CONNECTOR_eDP) | ||
53 | list_move_tail(&connector->head, &panel_list); | ||
54 | } | ||
55 | |||
56 | list_splice(&panel_list, &dev->mode_config.connector_list); | ||
57 | } | ||
58 | EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head); | ||
59 | |||
42 | static bool drm_kms_helper_poll = true; | 60 | static bool drm_kms_helper_poll = true; |
43 | module_param_named(poll, drm_kms_helper_poll, bool, 0600); | 61 | module_param_named(poll, drm_kms_helper_poll, bool, 0600); |
44 | 62 | ||
diff --git a/drivers/gpu/drm/drm_dp_i2c_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 7f246f212457..3c4cccd0d753 100644 --- a/drivers/gpu/drm/drm_dp_i2c_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c | |||
@@ -37,7 +37,7 @@ i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode, | |||
37 | { | 37 | { |
38 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; | 38 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; |
39 | int ret; | 39 | int ret; |
40 | 40 | ||
41 | ret = (*algo_data->aux_ch)(adapter, mode, | 41 | ret = (*algo_data->aux_ch)(adapter, mode, |
42 | write_byte, read_byte); | 42 | write_byte, read_byte); |
43 | return ret; | 43 | return ret; |
@@ -182,7 +182,6 @@ i2c_dp_aux_reset_bus(struct i2c_adapter *adapter) | |||
182 | { | 182 | { |
183 | (void) i2c_algo_dp_aux_address(adapter, 0, false); | 183 | (void) i2c_algo_dp_aux_address(adapter, 0, false); |
184 | (void) i2c_algo_dp_aux_stop(adapter, false); | 184 | (void) i2c_algo_dp_aux_stop(adapter, false); |
185 | |||
186 | } | 185 | } |
187 | 186 | ||
188 | static int | 187 | static int |
@@ -198,7 +197,7 @@ int | |||
198 | i2c_dp_aux_add_bus(struct i2c_adapter *adapter) | 197 | i2c_dp_aux_add_bus(struct i2c_adapter *adapter) |
199 | { | 198 | { |
200 | int error; | 199 | int error; |
201 | 200 | ||
202 | error = i2c_dp_aux_prepare_bus(adapter); | 201 | error = i2c_dp_aux_prepare_bus(adapter); |
203 | if (error) | 202 | if (error) |
204 | return error; | 203 | return error; |
@@ -206,3 +205,123 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter) | |||
206 | return error; | 205 | return error; |
207 | } | 206 | } |
208 | EXPORT_SYMBOL(i2c_dp_aux_add_bus); | 207 | EXPORT_SYMBOL(i2c_dp_aux_add_bus); |
208 | |||
209 | /* Helpers for DP link training */ | ||
210 | static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r) | ||
211 | { | ||
212 | return link_status[r - DP_LANE0_1_STATUS]; | ||
213 | } | ||
214 | |||
215 | static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE], | ||
216 | int lane) | ||
217 | { | ||
218 | int i = DP_LANE0_1_STATUS + (lane >> 1); | ||
219 | int s = (lane & 1) * 4; | ||
220 | u8 l = dp_link_status(link_status, i); | ||
221 | return (l >> s) & 0xf; | ||
222 | } | ||
223 | |||
224 | bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE], | ||
225 | int lane_count) | ||
226 | { | ||
227 | u8 lane_align; | ||
228 | u8 lane_status; | ||
229 | int lane; | ||
230 | |||
231 | lane_align = dp_link_status(link_status, | ||
232 | DP_LANE_ALIGN_STATUS_UPDATED); | ||
233 | if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) | ||
234 | return false; | ||
235 | for (lane = 0; lane < lane_count; lane++) { | ||
236 | lane_status = dp_get_lane_status(link_status, lane); | ||
237 | if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS) | ||
238 | return false; | ||
239 | } | ||
240 | return true; | ||
241 | } | ||
242 | EXPORT_SYMBOL(drm_dp_channel_eq_ok); | ||
243 | |||
244 | bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE], | ||
245 | int lane_count) | ||
246 | { | ||
247 | int lane; | ||
248 | u8 lane_status; | ||
249 | |||
250 | for (lane = 0; lane < lane_count; lane++) { | ||
251 | lane_status = dp_get_lane_status(link_status, lane); | ||
252 | if ((lane_status & DP_LANE_CR_DONE) == 0) | ||
253 | return false; | ||
254 | } | ||
255 | return true; | ||
256 | } | ||
257 | EXPORT_SYMBOL(drm_dp_clock_recovery_ok); | ||
258 | |||
259 | u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE], | ||
260 | int lane) | ||
261 | { | ||
262 | int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); | ||
263 | int s = ((lane & 1) ? | ||
264 | DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : | ||
265 | DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); | ||
266 | u8 l = dp_link_status(link_status, i); | ||
267 | |||
268 | return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; | ||
269 | } | ||
270 | EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage); | ||
271 | |||
272 | u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE], | ||
273 | int lane) | ||
274 | { | ||
275 | int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); | ||
276 | int s = ((lane & 1) ? | ||
277 | DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : | ||
278 | DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); | ||
279 | u8 l = dp_link_status(link_status, i); | ||
280 | |||
281 | return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; | ||
282 | } | ||
283 | EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis); | ||
284 | |||
285 | void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) { | ||
286 | if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0) | ||
287 | udelay(100); | ||
288 | else | ||
289 | mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4); | ||
290 | } | ||
291 | EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay); | ||
292 | |||
293 | void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) { | ||
294 | if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0) | ||
295 | udelay(400); | ||
296 | else | ||
297 | mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4); | ||
298 | } | ||
299 | EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay); | ||
300 | |||
301 | u8 drm_dp_link_rate_to_bw_code(int link_rate) | ||
302 | { | ||
303 | switch (link_rate) { | ||
304 | case 162000: | ||
305 | default: | ||
306 | return DP_LINK_BW_1_62; | ||
307 | case 270000: | ||
308 | return DP_LINK_BW_2_7; | ||
309 | case 540000: | ||
310 | return DP_LINK_BW_5_4; | ||
311 | } | ||
312 | } | ||
313 | EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code); | ||
314 | |||
315 | int drm_dp_bw_code_to_link_rate(u8 link_bw) | ||
316 | { | ||
317 | switch (link_bw) { | ||
318 | case DP_LINK_BW_1_62: | ||
319 | default: | ||
320 | return 162000; | ||
321 | case DP_LINK_BW_2_7: | ||
322 | return 270000; | ||
323 | case DP_LINK_BW_5_4: | ||
324 | return 540000; | ||
325 | } | ||
326 | } | ||
327 | EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate); | ||
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index dde8b505bf7f..4568e7d8a060 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -1068,7 +1068,7 @@ static int gen6_drpc_info(struct seq_file *m) | |||
1068 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 1068 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
1069 | struct drm_device *dev = node->minor->dev; | 1069 | struct drm_device *dev = node->minor->dev; |
1070 | struct drm_i915_private *dev_priv = dev->dev_private; | 1070 | struct drm_i915_private *dev_priv = dev->dev_private; |
1071 | u32 rpmodectl1, gt_core_status, rcctl1; | 1071 | u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; |
1072 | unsigned forcewake_count; | 1072 | unsigned forcewake_count; |
1073 | int count=0, ret; | 1073 | int count=0, ret; |
1074 | 1074 | ||
@@ -1097,6 +1097,9 @@ static int gen6_drpc_info(struct seq_file *m) | |||
1097 | rpmodectl1 = I915_READ(GEN6_RP_CONTROL); | 1097 | rpmodectl1 = I915_READ(GEN6_RP_CONTROL); |
1098 | rcctl1 = I915_READ(GEN6_RC_CONTROL); | 1098 | rcctl1 = I915_READ(GEN6_RC_CONTROL); |
1099 | mutex_unlock(&dev->struct_mutex); | 1099 | mutex_unlock(&dev->struct_mutex); |
1100 | mutex_lock(&dev_priv->rps.hw_lock); | ||
1101 | sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); | ||
1102 | mutex_unlock(&dev_priv->rps.hw_lock); | ||
1100 | 1103 | ||
1101 | seq_printf(m, "Video Turbo Mode: %s\n", | 1104 | seq_printf(m, "Video Turbo Mode: %s\n", |
1102 | yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); | 1105 | yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); |
@@ -1148,6 +1151,12 @@ static int gen6_drpc_info(struct seq_file *m) | |||
1148 | seq_printf(m, "RC6++ residency since boot: %u\n", | 1151 | seq_printf(m, "RC6++ residency since boot: %u\n", |
1149 | I915_READ(GEN6_GT_GFX_RC6pp)); | 1152 | I915_READ(GEN6_GT_GFX_RC6pp)); |
1150 | 1153 | ||
1154 | seq_printf(m, "RC6 voltage: %dmV\n", | ||
1155 | GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); | ||
1156 | seq_printf(m, "RC6+ voltage: %dmV\n", | ||
1157 | GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); | ||
1158 | seq_printf(m, "RC6++ voltage: %dmV\n", | ||
1159 | GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); | ||
1151 | return 0; | 1160 | return 0; |
1152 | } | 1161 | } |
1153 | 1162 | ||
@@ -1273,7 +1282,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) | |||
1273 | return 0; | 1282 | return 0; |
1274 | } | 1283 | } |
1275 | 1284 | ||
1276 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 1285 | ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); |
1277 | if (ret) | 1286 | if (ret) |
1278 | return ret; | 1287 | return ret; |
1279 | 1288 | ||
@@ -1282,19 +1291,14 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) | |||
1282 | for (gpu_freq = dev_priv->rps.min_delay; | 1291 | for (gpu_freq = dev_priv->rps.min_delay; |
1283 | gpu_freq <= dev_priv->rps.max_delay; | 1292 | gpu_freq <= dev_priv->rps.max_delay; |
1284 | gpu_freq++) { | 1293 | gpu_freq++) { |
1285 | I915_WRITE(GEN6_PCODE_DATA, gpu_freq); | 1294 | ia_freq = gpu_freq; |
1286 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | | 1295 | sandybridge_pcode_read(dev_priv, |
1287 | GEN6_PCODE_READ_MIN_FREQ_TABLE); | 1296 | GEN6_PCODE_READ_MIN_FREQ_TABLE, |
1288 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & | 1297 | &ia_freq); |
1289 | GEN6_PCODE_READY) == 0, 10)) { | ||
1290 | DRM_ERROR("pcode read of freq table timed out\n"); | ||
1291 | continue; | ||
1292 | } | ||
1293 | ia_freq = I915_READ(GEN6_PCODE_DATA); | ||
1294 | seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100); | 1298 | seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100); |
1295 | } | 1299 | } |
1296 | 1300 | ||
1297 | mutex_unlock(&dev->struct_mutex); | 1301 | mutex_unlock(&dev_priv->rps.hw_lock); |
1298 | 1302 | ||
1299 | return 0; | 1303 | return 0; |
1300 | } | 1304 | } |
@@ -1398,15 +1402,15 @@ static int i915_context_status(struct seq_file *m, void *unused) | |||
1398 | if (ret) | 1402 | if (ret) |
1399 | return ret; | 1403 | return ret; |
1400 | 1404 | ||
1401 | if (dev_priv->pwrctx) { | 1405 | if (dev_priv->ips.pwrctx) { |
1402 | seq_printf(m, "power context "); | 1406 | seq_printf(m, "power context "); |
1403 | describe_obj(m, dev_priv->pwrctx); | 1407 | describe_obj(m, dev_priv->ips.pwrctx); |
1404 | seq_printf(m, "\n"); | 1408 | seq_printf(m, "\n"); |
1405 | } | 1409 | } |
1406 | 1410 | ||
1407 | if (dev_priv->renderctx) { | 1411 | if (dev_priv->ips.renderctx) { |
1408 | seq_printf(m, "render context "); | 1412 | seq_printf(m, "render context "); |
1409 | describe_obj(m, dev_priv->renderctx); | 1413 | describe_obj(m, dev_priv->ips.renderctx); |
1410 | seq_printf(m, "\n"); | 1414 | seq_printf(m, "\n"); |
1411 | } | 1415 | } |
1412 | 1416 | ||
@@ -1711,13 +1715,13 @@ i915_max_freq_read(struct file *filp, | |||
1711 | if (!(IS_GEN6(dev) || IS_GEN7(dev))) | 1715 | if (!(IS_GEN6(dev) || IS_GEN7(dev))) |
1712 | return -ENODEV; | 1716 | return -ENODEV; |
1713 | 1717 | ||
1714 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 1718 | ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); |
1715 | if (ret) | 1719 | if (ret) |
1716 | return ret; | 1720 | return ret; |
1717 | 1721 | ||
1718 | len = snprintf(buf, sizeof(buf), | 1722 | len = snprintf(buf, sizeof(buf), |
1719 | "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER); | 1723 | "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER); |
1720 | mutex_unlock(&dev->struct_mutex); | 1724 | mutex_unlock(&dev_priv->rps.hw_lock); |
1721 | 1725 | ||
1722 | if (len > sizeof(buf)) | 1726 | if (len > sizeof(buf)) |
1723 | len = sizeof(buf); | 1727 | len = sizeof(buf); |
@@ -1752,7 +1756,7 @@ i915_max_freq_write(struct file *filp, | |||
1752 | 1756 | ||
1753 | DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val); | 1757 | DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val); |
1754 | 1758 | ||
1755 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 1759 | ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); |
1756 | if (ret) | 1760 | if (ret) |
1757 | return ret; | 1761 | return ret; |
1758 | 1762 | ||
@@ -1762,7 +1766,7 @@ i915_max_freq_write(struct file *filp, | |||
1762 | dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER; | 1766 | dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER; |
1763 | 1767 | ||
1764 | gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); | 1768 | gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); |
1765 | mutex_unlock(&dev->struct_mutex); | 1769 | mutex_unlock(&dev_priv->rps.hw_lock); |
1766 | 1770 | ||
1767 | return cnt; | 1771 | return cnt; |
1768 | } | 1772 | } |
@@ -1787,13 +1791,13 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max, | |||
1787 | if (!(IS_GEN6(dev) || IS_GEN7(dev))) | 1791 | if (!(IS_GEN6(dev) || IS_GEN7(dev))) |
1788 | return -ENODEV; | 1792 | return -ENODEV; |
1789 | 1793 | ||
1790 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 1794 | ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); |
1791 | if (ret) | 1795 | if (ret) |
1792 | return ret; | 1796 | return ret; |
1793 | 1797 | ||
1794 | len = snprintf(buf, sizeof(buf), | 1798 | len = snprintf(buf, sizeof(buf), |
1795 | "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER); | 1799 | "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER); |
1796 | mutex_unlock(&dev->struct_mutex); | 1800 | mutex_unlock(&dev_priv->rps.hw_lock); |
1797 | 1801 | ||
1798 | if (len > sizeof(buf)) | 1802 | if (len > sizeof(buf)) |
1799 | len = sizeof(buf); | 1803 | len = sizeof(buf); |
@@ -1826,7 +1830,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
1826 | 1830 | ||
1827 | DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val); | 1831 | DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val); |
1828 | 1832 | ||
1829 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 1833 | ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); |
1830 | if (ret) | 1834 | if (ret) |
1831 | return ret; | 1835 | return ret; |
1832 | 1836 | ||
@@ -1836,7 +1840,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
1836 | dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER; | 1840 | dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER; |
1837 | 1841 | ||
1838 | gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); | 1842 | gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); |
1839 | mutex_unlock(&dev->struct_mutex); | 1843 | mutex_unlock(&dev_priv->rps.hw_lock); |
1840 | 1844 | ||
1841 | return cnt; | 1845 | return cnt; |
1842 | } | 1846 | } |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 61ae104dca8c..a48e4910ea2c 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -104,32 +104,6 @@ static void i915_write_hws_pga(struct drm_device *dev) | |||
104 | } | 104 | } |
105 | 105 | ||
106 | /** | 106 | /** |
107 | * Sets up the hardware status page for devices that need a physical address | ||
108 | * in the register. | ||
109 | */ | ||
110 | static int i915_init_phys_hws(struct drm_device *dev) | ||
111 | { | ||
112 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
113 | |||
114 | /* Program Hardware Status Page */ | ||
115 | dev_priv->status_page_dmah = | ||
116 | drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); | ||
117 | |||
118 | if (!dev_priv->status_page_dmah) { | ||
119 | DRM_ERROR("Can not allocate hardware status page\n"); | ||
120 | return -ENOMEM; | ||
121 | } | ||
122 | |||
123 | memset_io((void __force __iomem *)dev_priv->status_page_dmah->vaddr, | ||
124 | 0, PAGE_SIZE); | ||
125 | |||
126 | i915_write_hws_pga(dev); | ||
127 | |||
128 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); | ||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | /** | ||
133 | * Frees the hardware status page, whether it's a physical address or a virtual | 107 | * Frees the hardware status page, whether it's a physical address or a virtual |
134 | * address set up by the X Server. | 108 | * address set up by the X Server. |
135 | */ | 109 | */ |
@@ -451,16 +425,16 @@ static void i915_emit_breadcrumb(struct drm_device *dev) | |||
451 | drm_i915_private_t *dev_priv = dev->dev_private; | 425 | drm_i915_private_t *dev_priv = dev->dev_private; |
452 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | 426 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
453 | 427 | ||
454 | dev_priv->counter++; | 428 | dev_priv->dri1.counter++; |
455 | if (dev_priv->counter > 0x7FFFFFFFUL) | 429 | if (dev_priv->dri1.counter > 0x7FFFFFFFUL) |
456 | dev_priv->counter = 0; | 430 | dev_priv->dri1.counter = 0; |
457 | if (master_priv->sarea_priv) | 431 | if (master_priv->sarea_priv) |
458 | master_priv->sarea_priv->last_enqueue = dev_priv->counter; | 432 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; |
459 | 433 | ||
460 | if (BEGIN_LP_RING(4) == 0) { | 434 | if (BEGIN_LP_RING(4) == 0) { |
461 | OUT_RING(MI_STORE_DWORD_INDEX); | 435 | OUT_RING(MI_STORE_DWORD_INDEX); |
462 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 436 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
463 | OUT_RING(dev_priv->counter); | 437 | OUT_RING(dev_priv->dri1.counter); |
464 | OUT_RING(0); | 438 | OUT_RING(0); |
465 | ADVANCE_LP_RING(); | 439 | ADVANCE_LP_RING(); |
466 | } | 440 | } |
@@ -602,12 +576,12 @@ static int i915_dispatch_flip(struct drm_device * dev) | |||
602 | 576 | ||
603 | ADVANCE_LP_RING(); | 577 | ADVANCE_LP_RING(); |
604 | 578 | ||
605 | master_priv->sarea_priv->last_enqueue = dev_priv->counter++; | 579 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++; |
606 | 580 | ||
607 | if (BEGIN_LP_RING(4) == 0) { | 581 | if (BEGIN_LP_RING(4) == 0) { |
608 | OUT_RING(MI_STORE_DWORD_INDEX); | 582 | OUT_RING(MI_STORE_DWORD_INDEX); |
609 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 583 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
610 | OUT_RING(dev_priv->counter); | 584 | OUT_RING(dev_priv->dri1.counter); |
611 | OUT_RING(0); | 585 | OUT_RING(0); |
612 | ADVANCE_LP_RING(); | 586 | ADVANCE_LP_RING(); |
613 | } | 587 | } |
@@ -775,21 +749,21 @@ static int i915_emit_irq(struct drm_device * dev) | |||
775 | 749 | ||
776 | DRM_DEBUG_DRIVER("\n"); | 750 | DRM_DEBUG_DRIVER("\n"); |
777 | 751 | ||
778 | dev_priv->counter++; | 752 | dev_priv->dri1.counter++; |
779 | if (dev_priv->counter > 0x7FFFFFFFUL) | 753 | if (dev_priv->dri1.counter > 0x7FFFFFFFUL) |
780 | dev_priv->counter = 1; | 754 | dev_priv->dri1.counter = 1; |
781 | if (master_priv->sarea_priv) | 755 | if (master_priv->sarea_priv) |
782 | master_priv->sarea_priv->last_enqueue = dev_priv->counter; | 756 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; |
783 | 757 | ||
784 | if (BEGIN_LP_RING(4) == 0) { | 758 | if (BEGIN_LP_RING(4) == 0) { |
785 | OUT_RING(MI_STORE_DWORD_INDEX); | 759 | OUT_RING(MI_STORE_DWORD_INDEX); |
786 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 760 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
787 | OUT_RING(dev_priv->counter); | 761 | OUT_RING(dev_priv->dri1.counter); |
788 | OUT_RING(MI_USER_INTERRUPT); | 762 | OUT_RING(MI_USER_INTERRUPT); |
789 | ADVANCE_LP_RING(); | 763 | ADVANCE_LP_RING(); |
790 | } | 764 | } |
791 | 765 | ||
792 | return dev_priv->counter; | 766 | return dev_priv->dri1.counter; |
793 | } | 767 | } |
794 | 768 | ||
795 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) | 769 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) |
@@ -820,7 +794,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) | |||
820 | 794 | ||
821 | if (ret == -EBUSY) { | 795 | if (ret == -EBUSY) { |
822 | DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", | 796 | DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", |
823 | READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); | 797 | READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter); |
824 | } | 798 | } |
825 | 799 | ||
826 | return ret; | 800 | return ret; |
@@ -1014,6 +988,9 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
1014 | case I915_PARAM_HAS_PRIME_VMAP_FLUSH: | 988 | case I915_PARAM_HAS_PRIME_VMAP_FLUSH: |
1015 | value = 1; | 989 | value = 1; |
1016 | break; | 990 | break; |
991 | case I915_PARAM_HAS_SECURE_BATCHES: | ||
992 | value = capable(CAP_SYS_ADMIN); | ||
993 | break; | ||
1017 | default: | 994 | default: |
1018 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", | 995 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", |
1019 | param->param); | 996 | param->param); |
@@ -1326,6 +1303,8 @@ static int i915_load_modeset_init(struct drm_device *dev) | |||
1326 | 1303 | ||
1327 | intel_modeset_gem_init(dev); | 1304 | intel_modeset_gem_init(dev); |
1328 | 1305 | ||
1306 | INIT_WORK(&dev_priv->console_resume_work, intel_console_resume); | ||
1307 | |||
1329 | ret = drm_irq_install(dev); | 1308 | ret = drm_irq_install(dev); |
1330 | if (ret) | 1309 | if (ret) |
1331 | goto cleanup_gem; | 1310 | goto cleanup_gem; |
@@ -1491,19 +1470,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1491 | goto free_priv; | 1470 | goto free_priv; |
1492 | } | 1471 | } |
1493 | 1472 | ||
1494 | ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL); | 1473 | ret = i915_gem_gtt_init(dev); |
1495 | if (!ret) { | 1474 | if (ret) |
1496 | DRM_ERROR("failed to set up gmch\n"); | ||
1497 | ret = -EIO; | ||
1498 | goto put_bridge; | 1475 | goto put_bridge; |
1499 | } | ||
1500 | |||
1501 | dev_priv->mm.gtt = intel_gtt_get(); | ||
1502 | if (!dev_priv->mm.gtt) { | ||
1503 | DRM_ERROR("Failed to initialize GTT\n"); | ||
1504 | ret = -ENODEV; | ||
1505 | goto put_gmch; | ||
1506 | } | ||
1507 | 1476 | ||
1508 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 1477 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
1509 | i915_kick_out_firmware_fb(dev_priv); | 1478 | i915_kick_out_firmware_fb(dev_priv); |
@@ -1590,18 +1559,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1590 | intel_setup_gmbus(dev); | 1559 | intel_setup_gmbus(dev); |
1591 | intel_opregion_setup(dev); | 1560 | intel_opregion_setup(dev); |
1592 | 1561 | ||
1593 | /* Make sure the bios did its job and set up vital registers */ | ||
1594 | intel_setup_bios(dev); | 1562 | intel_setup_bios(dev); |
1595 | 1563 | ||
1596 | i915_gem_load(dev); | 1564 | i915_gem_load(dev); |
1597 | 1565 | ||
1598 | /* Init HWS */ | ||
1599 | if (!I915_NEED_GFX_HWS(dev)) { | ||
1600 | ret = i915_init_phys_hws(dev); | ||
1601 | if (ret) | ||
1602 | goto out_gem_unload; | ||
1603 | } | ||
1604 | |||
1605 | /* On the 945G/GM, the chipset reports the MSI capability on the | 1566 | /* On the 945G/GM, the chipset reports the MSI capability on the |
1606 | * integrated graphics even though the support isn't actually there | 1567 | * integrated graphics even though the support isn't actually there |
1607 | * according to the published specs. It doesn't appear to function | 1568 | * according to the published specs. It doesn't appear to function |
@@ -1621,6 +1582,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1621 | spin_lock_init(&dev_priv->rps.lock); | 1582 | spin_lock_init(&dev_priv->rps.lock); |
1622 | spin_lock_init(&dev_priv->dpio_lock); | 1583 | spin_lock_init(&dev_priv->dpio_lock); |
1623 | 1584 | ||
1585 | mutex_init(&dev_priv->rps.hw_lock); | ||
1586 | |||
1624 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) | 1587 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) |
1625 | dev_priv->num_pipe = 3; | 1588 | dev_priv->num_pipe = 3; |
1626 | else if (IS_MOBILE(dev) || !IS_GEN2(dev)) | 1589 | else if (IS_MOBILE(dev) || !IS_GEN2(dev)) |
@@ -1678,7 +1641,7 @@ out_mtrrfree: | |||
1678 | out_rmmap: | 1641 | out_rmmap: |
1679 | pci_iounmap(dev->pdev, dev_priv->regs); | 1642 | pci_iounmap(dev->pdev, dev_priv->regs); |
1680 | put_gmch: | 1643 | put_gmch: |
1681 | intel_gmch_remove(); | 1644 | i915_gem_gtt_fini(dev); |
1682 | put_bridge: | 1645 | put_bridge: |
1683 | pci_dev_put(dev_priv->bridge_dev); | 1646 | pci_dev_put(dev_priv->bridge_dev); |
1684 | free_priv: | 1647 | free_priv: |
@@ -1721,6 +1684,7 @@ int i915_driver_unload(struct drm_device *dev) | |||
1721 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 1684 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1722 | intel_fbdev_fini(dev); | 1685 | intel_fbdev_fini(dev); |
1723 | intel_modeset_cleanup(dev); | 1686 | intel_modeset_cleanup(dev); |
1687 | cancel_work_sync(&dev_priv->console_resume_work); | ||
1724 | 1688 | ||
1725 | /* | 1689 | /* |
1726 | * free the memory space allocated for the child device | 1690 | * free the memory space allocated for the child device |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 6770ee6084b4..f85e8b0ec00f 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -423,19 +423,23 @@ void intel_detect_pch(struct drm_device *dev) | |||
423 | dev_priv->pch_type = PCH_IBX; | 423 | dev_priv->pch_type = PCH_IBX; |
424 | dev_priv->num_pch_pll = 2; | 424 | dev_priv->num_pch_pll = 2; |
425 | DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); | 425 | DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); |
426 | WARN_ON(!IS_GEN5(dev)); | ||
426 | } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { | 427 | } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { |
427 | dev_priv->pch_type = PCH_CPT; | 428 | dev_priv->pch_type = PCH_CPT; |
428 | dev_priv->num_pch_pll = 2; | 429 | dev_priv->num_pch_pll = 2; |
429 | DRM_DEBUG_KMS("Found CougarPoint PCH\n"); | 430 | DRM_DEBUG_KMS("Found CougarPoint PCH\n"); |
431 | WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); | ||
430 | } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { | 432 | } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { |
431 | /* PantherPoint is CPT compatible */ | 433 | /* PantherPoint is CPT compatible */ |
432 | dev_priv->pch_type = PCH_CPT; | 434 | dev_priv->pch_type = PCH_CPT; |
433 | dev_priv->num_pch_pll = 2; | 435 | dev_priv->num_pch_pll = 2; |
434 | DRM_DEBUG_KMS("Found PatherPoint PCH\n"); | 436 | DRM_DEBUG_KMS("Found PatherPoint PCH\n"); |
437 | WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); | ||
435 | } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { | 438 | } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { |
436 | dev_priv->pch_type = PCH_LPT; | 439 | dev_priv->pch_type = PCH_LPT; |
437 | dev_priv->num_pch_pll = 0; | 440 | dev_priv->num_pch_pll = 0; |
438 | DRM_DEBUG_KMS("Found LynxPoint PCH\n"); | 441 | DRM_DEBUG_KMS("Found LynxPoint PCH\n"); |
442 | WARN_ON(!IS_HASWELL(dev)); | ||
439 | } | 443 | } |
440 | BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS); | 444 | BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS); |
441 | } | 445 | } |
@@ -477,6 +481,8 @@ static int i915_drm_freeze(struct drm_device *dev) | |||
477 | return error; | 481 | return error; |
478 | } | 482 | } |
479 | 483 | ||
484 | cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work); | ||
485 | |||
480 | intel_modeset_disable(dev); | 486 | intel_modeset_disable(dev); |
481 | 487 | ||
482 | drm_irq_uninstall(dev); | 488 | drm_irq_uninstall(dev); |
@@ -526,17 +532,23 @@ int i915_suspend(struct drm_device *dev, pm_message_t state) | |||
526 | return 0; | 532 | return 0; |
527 | } | 533 | } |
528 | 534 | ||
529 | static int i915_drm_thaw(struct drm_device *dev) | 535 | void intel_console_resume(struct work_struct *work) |
536 | { | ||
537 | struct drm_i915_private *dev_priv = | ||
538 | container_of(work, struct drm_i915_private, | ||
539 | console_resume_work); | ||
540 | struct drm_device *dev = dev_priv->dev; | ||
541 | |||
542 | console_lock(); | ||
543 | intel_fbdev_set_suspend(dev, 0); | ||
544 | console_unlock(); | ||
545 | } | ||
546 | |||
547 | static int __i915_drm_thaw(struct drm_device *dev) | ||
530 | { | 548 | { |
531 | struct drm_i915_private *dev_priv = dev->dev_private; | 549 | struct drm_i915_private *dev_priv = dev->dev_private; |
532 | int error = 0; | 550 | int error = 0; |
533 | 551 | ||
534 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
535 | mutex_lock(&dev->struct_mutex); | ||
536 | i915_gem_restore_gtt_mappings(dev); | ||
537 | mutex_unlock(&dev->struct_mutex); | ||
538 | } | ||
539 | |||
540 | i915_restore_state(dev); | 552 | i915_restore_state(dev); |
541 | intel_opregion_setup(dev); | 553 | intel_opregion_setup(dev); |
542 | 554 | ||
@@ -553,7 +565,6 @@ static int i915_drm_thaw(struct drm_device *dev) | |||
553 | 565 | ||
554 | intel_modeset_init_hw(dev); | 566 | intel_modeset_init_hw(dev); |
555 | intel_modeset_setup_hw_state(dev); | 567 | intel_modeset_setup_hw_state(dev); |
556 | drm_mode_config_reset(dev); | ||
557 | drm_irq_install(dev); | 568 | drm_irq_install(dev); |
558 | } | 569 | } |
559 | 570 | ||
@@ -561,14 +572,41 @@ static int i915_drm_thaw(struct drm_device *dev) | |||
561 | 572 | ||
562 | dev_priv->modeset_on_lid = 0; | 573 | dev_priv->modeset_on_lid = 0; |
563 | 574 | ||
564 | console_lock(); | 575 | /* |
565 | intel_fbdev_set_suspend(dev, 0); | 576 | * The console lock can be pretty contented on resume due |
566 | console_unlock(); | 577 | * to all the printk activity. Try to keep it out of the hot |
578 | * path of resume if possible. | ||
579 | */ | ||
580 | if (console_trylock()) { | ||
581 | intel_fbdev_set_suspend(dev, 0); | ||
582 | console_unlock(); | ||
583 | } else { | ||
584 | schedule_work(&dev_priv->console_resume_work); | ||
585 | } | ||
586 | |||
587 | return error; | ||
588 | } | ||
589 | |||
590 | static int i915_drm_thaw(struct drm_device *dev) | ||
591 | { | ||
592 | int error = 0; | ||
593 | |||
594 | intel_gt_reset(dev); | ||
595 | |||
596 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
597 | mutex_lock(&dev->struct_mutex); | ||
598 | i915_gem_restore_gtt_mappings(dev); | ||
599 | mutex_unlock(&dev->struct_mutex); | ||
600 | } | ||
601 | |||
602 | __i915_drm_thaw(dev); | ||
603 | |||
567 | return error; | 604 | return error; |
568 | } | 605 | } |
569 | 606 | ||
570 | int i915_resume(struct drm_device *dev) | 607 | int i915_resume(struct drm_device *dev) |
571 | { | 608 | { |
609 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
572 | int ret; | 610 | int ret; |
573 | 611 | ||
574 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) | 612 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
@@ -579,7 +617,20 @@ int i915_resume(struct drm_device *dev) | |||
579 | 617 | ||
580 | pci_set_master(dev->pdev); | 618 | pci_set_master(dev->pdev); |
581 | 619 | ||
582 | ret = i915_drm_thaw(dev); | 620 | intel_gt_reset(dev); |
621 | |||
622 | /* | ||
623 | * Platforms with opregion should have sane BIOS, older ones (gen3 and | ||
624 | * earlier) need this since the BIOS might clear all our scratch PTEs. | ||
625 | */ | ||
626 | if (drm_core_check_feature(dev, DRIVER_MODESET) && | ||
627 | !dev_priv->opregion.header) { | ||
628 | mutex_lock(&dev->struct_mutex); | ||
629 | i915_gem_restore_gtt_mappings(dev); | ||
630 | mutex_unlock(&dev->struct_mutex); | ||
631 | } | ||
632 | |||
633 | ret = __i915_drm_thaw(dev); | ||
583 | if (ret) | 634 | if (ret) |
584 | return ret; | 635 | return ret; |
585 | 636 | ||
@@ -1140,12 +1191,40 @@ static bool IS_DISPLAYREG(u32 reg) | |||
1140 | if (reg == GEN6_GDRST) | 1191 | if (reg == GEN6_GDRST) |
1141 | return false; | 1192 | return false; |
1142 | 1193 | ||
1194 | switch (reg) { | ||
1195 | case _3D_CHICKEN3: | ||
1196 | case IVB_CHICKEN3: | ||
1197 | case GEN7_COMMON_SLICE_CHICKEN1: | ||
1198 | case GEN7_L3CNTLREG1: | ||
1199 | case GEN7_L3_CHICKEN_MODE_REGISTER: | ||
1200 | case GEN7_ROW_CHICKEN2: | ||
1201 | case GEN7_L3SQCREG4: | ||
1202 | case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG: | ||
1203 | case GEN7_HALF_SLICE_CHICKEN1: | ||
1204 | case GEN6_MBCTL: | ||
1205 | case GEN6_UCGCTL2: | ||
1206 | return false; | ||
1207 | default: | ||
1208 | break; | ||
1209 | } | ||
1210 | |||
1143 | return true; | 1211 | return true; |
1144 | } | 1212 | } |
1145 | 1213 | ||
1214 | static void | ||
1215 | ilk_dummy_write(struct drm_i915_private *dev_priv) | ||
1216 | { | ||
1217 | /* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the | ||
1218 | * chip from rc6 before touching it for real. MI_MODE is masked, hence | ||
1219 | * harmless to write 0 into. */ | ||
1220 | I915_WRITE_NOTRACE(MI_MODE, 0); | ||
1221 | } | ||
1222 | |||
1146 | #define __i915_read(x, y) \ | 1223 | #define __i915_read(x, y) \ |
1147 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ | 1224 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ |
1148 | u##x val = 0; \ | 1225 | u##x val = 0; \ |
1226 | if (IS_GEN5(dev_priv->dev)) \ | ||
1227 | ilk_dummy_write(dev_priv); \ | ||
1149 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | 1228 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
1150 | unsigned long irqflags; \ | 1229 | unsigned long irqflags; \ |
1151 | spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ | 1230 | spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ |
@@ -1177,6 +1256,8 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ | |||
1177 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | 1256 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
1178 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ | 1257 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ |
1179 | } \ | 1258 | } \ |
1259 | if (IS_GEN5(dev_priv->dev)) \ | ||
1260 | ilk_dummy_write(dev_priv); \ | ||
1180 | if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \ | 1261 | if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \ |
1181 | write##y(val, dev_priv->regs + reg + 0x180000); \ | 1262 | write##y(val, dev_priv->regs + reg + 0x180000); \ |
1182 | } else { \ | 1263 | } else { \ |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f511fa2f4168..4b83e5f4b32e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -58,6 +58,14 @@ enum pipe { | |||
58 | }; | 58 | }; |
59 | #define pipe_name(p) ((p) + 'A') | 59 | #define pipe_name(p) ((p) + 'A') |
60 | 60 | ||
61 | enum transcoder { | ||
62 | TRANSCODER_A = 0, | ||
63 | TRANSCODER_B, | ||
64 | TRANSCODER_C, | ||
65 | TRANSCODER_EDP = 0xF, | ||
66 | }; | ||
67 | #define transcoder_name(t) ((t) + 'A') | ||
68 | |||
61 | enum plane { | 69 | enum plane { |
62 | PLANE_A = 0, | 70 | PLANE_A = 0, |
63 | PLANE_B, | 71 | PLANE_B, |
@@ -93,6 +101,12 @@ struct intel_pch_pll { | |||
93 | }; | 101 | }; |
94 | #define I915_NUM_PLLS 2 | 102 | #define I915_NUM_PLLS 2 |
95 | 103 | ||
104 | struct intel_ddi_plls { | ||
105 | int spll_refcount; | ||
106 | int wrpll1_refcount; | ||
107 | int wrpll2_refcount; | ||
108 | }; | ||
109 | |||
96 | /* Interface history: | 110 | /* Interface history: |
97 | * | 111 | * |
98 | * 1.1: Original. | 112 | * 1.1: Original. |
@@ -123,14 +137,6 @@ struct drm_i915_gem_phys_object { | |||
123 | struct drm_i915_gem_object *cur_obj; | 137 | struct drm_i915_gem_object *cur_obj; |
124 | }; | 138 | }; |
125 | 139 | ||
126 | struct mem_block { | ||
127 | struct mem_block *next; | ||
128 | struct mem_block *prev; | ||
129 | int start; | ||
130 | int size; | ||
131 | struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ | ||
132 | }; | ||
133 | |||
134 | struct opregion_header; | 140 | struct opregion_header; |
135 | struct opregion_acpi; | 141 | struct opregion_acpi; |
136 | struct opregion_swsci; | 142 | struct opregion_swsci; |
@@ -251,6 +257,7 @@ struct drm_i915_display_funcs { | |||
251 | uint32_t sprite_width, int pixel_size); | 257 | uint32_t sprite_width, int pixel_size); |
252 | void (*update_linetime_wm)(struct drm_device *dev, int pipe, | 258 | void (*update_linetime_wm)(struct drm_device *dev, int pipe, |
253 | struct drm_display_mode *mode); | 259 | struct drm_display_mode *mode); |
260 | void (*modeset_global_resources)(struct drm_device *dev); | ||
254 | int (*crtc_mode_set)(struct drm_crtc *crtc, | 261 | int (*crtc_mode_set)(struct drm_crtc *crtc, |
255 | struct drm_display_mode *mode, | 262 | struct drm_display_mode *mode, |
256 | struct drm_display_mode *adjusted_mode, | 263 | struct drm_display_mode *adjusted_mode, |
@@ -263,7 +270,6 @@ struct drm_i915_display_funcs { | |||
263 | struct drm_crtc *crtc); | 270 | struct drm_crtc *crtc); |
264 | void (*fdi_link_train)(struct drm_crtc *crtc); | 271 | void (*fdi_link_train)(struct drm_crtc *crtc); |
265 | void (*init_clock_gating)(struct drm_device *dev); | 272 | void (*init_clock_gating)(struct drm_device *dev); |
266 | void (*init_pch_clock_gating)(struct drm_device *dev); | ||
267 | int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, | 273 | int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, |
268 | struct drm_framebuffer *fb, | 274 | struct drm_framebuffer *fb, |
269 | struct drm_i915_gem_object *obj); | 275 | struct drm_i915_gem_object *obj); |
@@ -338,6 +344,7 @@ struct intel_device_info { | |||
338 | #define I915_PPGTT_PD_ENTRIES 512 | 344 | #define I915_PPGTT_PD_ENTRIES 512 |
339 | #define I915_PPGTT_PT_ENTRIES 1024 | 345 | #define I915_PPGTT_PT_ENTRIES 1024 |
340 | struct i915_hw_ppgtt { | 346 | struct i915_hw_ppgtt { |
347 | struct drm_device *dev; | ||
341 | unsigned num_pd_entries; | 348 | unsigned num_pd_entries; |
342 | struct page **pt_pages; | 349 | struct page **pt_pages; |
343 | uint32_t pd_offset; | 350 | uint32_t pd_offset; |
@@ -383,149 +390,14 @@ struct intel_fbc_work; | |||
383 | 390 | ||
384 | struct intel_gmbus { | 391 | struct intel_gmbus { |
385 | struct i2c_adapter adapter; | 392 | struct i2c_adapter adapter; |
386 | bool force_bit; | 393 | u32 force_bit; |
387 | u32 reg0; | 394 | u32 reg0; |
388 | u32 gpio_reg; | 395 | u32 gpio_reg; |
389 | struct i2c_algo_bit_data bit_algo; | 396 | struct i2c_algo_bit_data bit_algo; |
390 | struct drm_i915_private *dev_priv; | 397 | struct drm_i915_private *dev_priv; |
391 | }; | 398 | }; |
392 | 399 | ||
393 | typedef struct drm_i915_private { | 400 | struct i915_suspend_saved_registers { |
394 | struct drm_device *dev; | ||
395 | |||
396 | const struct intel_device_info *info; | ||
397 | |||
398 | int relative_constants_mode; | ||
399 | |||
400 | void __iomem *regs; | ||
401 | |||
402 | struct drm_i915_gt_funcs gt; | ||
403 | /** gt_fifo_count and the subsequent register write are synchronized | ||
404 | * with dev->struct_mutex. */ | ||
405 | unsigned gt_fifo_count; | ||
406 | /** forcewake_count is protected by gt_lock */ | ||
407 | unsigned forcewake_count; | ||
408 | /** gt_lock is also taken in irq contexts. */ | ||
409 | struct spinlock gt_lock; | ||
410 | |||
411 | struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; | ||
412 | |||
413 | /** gmbus_mutex protects against concurrent usage of the single hw gmbus | ||
414 | * controller on different i2c buses. */ | ||
415 | struct mutex gmbus_mutex; | ||
416 | |||
417 | /** | ||
418 | * Base address of the gmbus and gpio block. | ||
419 | */ | ||
420 | uint32_t gpio_mmio_base; | ||
421 | |||
422 | struct pci_dev *bridge_dev; | ||
423 | struct intel_ring_buffer ring[I915_NUM_RINGS]; | ||
424 | uint32_t next_seqno; | ||
425 | |||
426 | drm_dma_handle_t *status_page_dmah; | ||
427 | uint32_t counter; | ||
428 | struct drm_i915_gem_object *pwrctx; | ||
429 | struct drm_i915_gem_object *renderctx; | ||
430 | |||
431 | struct resource mch_res; | ||
432 | |||
433 | atomic_t irq_received; | ||
434 | |||
435 | /* protects the irq masks */ | ||
436 | spinlock_t irq_lock; | ||
437 | |||
438 | /* DPIO indirect register protection */ | ||
439 | spinlock_t dpio_lock; | ||
440 | |||
441 | /** Cached value of IMR to avoid reads in updating the bitfield */ | ||
442 | u32 pipestat[2]; | ||
443 | u32 irq_mask; | ||
444 | u32 gt_irq_mask; | ||
445 | u32 pch_irq_mask; | ||
446 | |||
447 | u32 hotplug_supported_mask; | ||
448 | struct work_struct hotplug_work; | ||
449 | |||
450 | int num_pipe; | ||
451 | int num_pch_pll; | ||
452 | |||
453 | /* For hangcheck timer */ | ||
454 | #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ | ||
455 | struct timer_list hangcheck_timer; | ||
456 | int hangcheck_count; | ||
457 | uint32_t last_acthd[I915_NUM_RINGS]; | ||
458 | uint32_t prev_instdone[I915_NUM_INSTDONE_REG]; | ||
459 | |||
460 | unsigned int stop_rings; | ||
461 | |||
462 | unsigned long cfb_size; | ||
463 | unsigned int cfb_fb; | ||
464 | enum plane cfb_plane; | ||
465 | int cfb_y; | ||
466 | struct intel_fbc_work *fbc_work; | ||
467 | |||
468 | struct intel_opregion opregion; | ||
469 | |||
470 | /* overlay */ | ||
471 | struct intel_overlay *overlay; | ||
472 | bool sprite_scaling_enabled; | ||
473 | |||
474 | /* LVDS info */ | ||
475 | int backlight_level; /* restore backlight to this value */ | ||
476 | bool backlight_enabled; | ||
477 | struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ | ||
478 | struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ | ||
479 | |||
480 | /* Feature bits from the VBIOS */ | ||
481 | unsigned int int_tv_support:1; | ||
482 | unsigned int lvds_dither:1; | ||
483 | unsigned int lvds_vbt:1; | ||
484 | unsigned int int_crt_support:1; | ||
485 | unsigned int lvds_use_ssc:1; | ||
486 | unsigned int display_clock_mode:1; | ||
487 | int lvds_ssc_freq; | ||
488 | unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ | ||
489 | unsigned int lvds_val; /* used for checking LVDS channel mode */ | ||
490 | struct { | ||
491 | int rate; | ||
492 | int lanes; | ||
493 | int preemphasis; | ||
494 | int vswing; | ||
495 | |||
496 | bool initialized; | ||
497 | bool support; | ||
498 | int bpp; | ||
499 | struct edp_power_seq pps; | ||
500 | } edp; | ||
501 | bool no_aux_handshake; | ||
502 | |||
503 | struct notifier_block lid_notifier; | ||
504 | |||
505 | int crt_ddc_pin; | ||
506 | struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ | ||
507 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ | ||
508 | int num_fence_regs; /* 8 on pre-965, 16 otherwise */ | ||
509 | |||
510 | unsigned int fsb_freq, mem_freq, is_ddr3; | ||
511 | |||
512 | spinlock_t error_lock; | ||
513 | /* Protected by dev->error_lock. */ | ||
514 | struct drm_i915_error_state *first_error; | ||
515 | struct work_struct error_work; | ||
516 | struct completion error_completion; | ||
517 | struct workqueue_struct *wq; | ||
518 | |||
519 | /* Display functions */ | ||
520 | struct drm_i915_display_funcs display; | ||
521 | |||
522 | /* PCH chipset type */ | ||
523 | enum intel_pch pch_type; | ||
524 | |||
525 | unsigned long quirks; | ||
526 | |||
527 | /* Register state */ | ||
528 | bool modeset_on_lid; | ||
529 | u8 saveLBB; | 401 | u8 saveLBB; |
530 | u32 saveDSPACNTR; | 402 | u32 saveDSPACNTR; |
531 | u32 saveDSPBCNTR; | 403 | u32 saveDSPBCNTR; |
@@ -676,10 +548,205 @@ typedef struct drm_i915_private { | |||
676 | u32 savePIPEB_LINK_N1; | 548 | u32 savePIPEB_LINK_N1; |
677 | u32 saveMCHBAR_RENDER_STANDBY; | 549 | u32 saveMCHBAR_RENDER_STANDBY; |
678 | u32 savePCH_PORT_HOTPLUG; | 550 | u32 savePCH_PORT_HOTPLUG; |
551 | }; | ||
552 | |||
553 | struct intel_gen6_power_mgmt { | ||
554 | struct work_struct work; | ||
555 | u32 pm_iir; | ||
556 | /* lock - irqsave spinlock that protectects the work_struct and | ||
557 | * pm_iir. */ | ||
558 | spinlock_t lock; | ||
559 | |||
560 | /* The below variables an all the rps hw state are protected by | ||
561 | * dev->struct mutext. */ | ||
562 | u8 cur_delay; | ||
563 | u8 min_delay; | ||
564 | u8 max_delay; | ||
565 | |||
566 | struct delayed_work delayed_resume_work; | ||
567 | |||
568 | /* | ||
569 | * Protects RPS/RC6 register access and PCU communication. | ||
570 | * Must be taken after struct_mutex if nested. | ||
571 | */ | ||
572 | struct mutex hw_lock; | ||
573 | }; | ||
574 | |||
575 | struct intel_ilk_power_mgmt { | ||
576 | u8 cur_delay; | ||
577 | u8 min_delay; | ||
578 | u8 max_delay; | ||
579 | u8 fmax; | ||
580 | u8 fstart; | ||
581 | |||
582 | u64 last_count1; | ||
583 | unsigned long last_time1; | ||
584 | unsigned long chipset_power; | ||
585 | u64 last_count2; | ||
586 | struct timespec last_time2; | ||
587 | unsigned long gfx_power; | ||
588 | u8 corr; | ||
589 | |||
590 | int c_m; | ||
591 | int r_t; | ||
592 | |||
593 | struct drm_i915_gem_object *pwrctx; | ||
594 | struct drm_i915_gem_object *renderctx; | ||
595 | }; | ||
596 | |||
597 | struct i915_dri1_state { | ||
598 | unsigned allow_batchbuffer : 1; | ||
599 | u32 __iomem *gfx_hws_cpu_addr; | ||
600 | |||
601 | unsigned int cpp; | ||
602 | int back_offset; | ||
603 | int front_offset; | ||
604 | int current_page; | ||
605 | int page_flipping; | ||
606 | |||
607 | uint32_t counter; | ||
608 | }; | ||
609 | |||
610 | struct intel_l3_parity { | ||
611 | u32 *remap_info; | ||
612 | struct work_struct error_work; | ||
613 | }; | ||
614 | |||
615 | typedef struct drm_i915_private { | ||
616 | struct drm_device *dev; | ||
617 | |||
618 | const struct intel_device_info *info; | ||
619 | |||
620 | int relative_constants_mode; | ||
621 | |||
622 | void __iomem *regs; | ||
623 | |||
624 | struct drm_i915_gt_funcs gt; | ||
625 | /** gt_fifo_count and the subsequent register write are synchronized | ||
626 | * with dev->struct_mutex. */ | ||
627 | unsigned gt_fifo_count; | ||
628 | /** forcewake_count is protected by gt_lock */ | ||
629 | unsigned forcewake_count; | ||
630 | /** gt_lock is also taken in irq contexts. */ | ||
631 | struct spinlock gt_lock; | ||
632 | |||
633 | struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; | ||
634 | |||
635 | /** gmbus_mutex protects against concurrent usage of the single hw gmbus | ||
636 | * controller on different i2c buses. */ | ||
637 | struct mutex gmbus_mutex; | ||
638 | |||
639 | /** | ||
640 | * Base address of the gmbus and gpio block. | ||
641 | */ | ||
642 | uint32_t gpio_mmio_base; | ||
643 | |||
644 | struct pci_dev *bridge_dev; | ||
645 | struct intel_ring_buffer ring[I915_NUM_RINGS]; | ||
646 | uint32_t next_seqno; | ||
647 | |||
648 | drm_dma_handle_t *status_page_dmah; | ||
649 | struct resource mch_res; | ||
650 | |||
651 | atomic_t irq_received; | ||
652 | |||
653 | /* protects the irq masks */ | ||
654 | spinlock_t irq_lock; | ||
655 | |||
656 | /* DPIO indirect register protection */ | ||
657 | spinlock_t dpio_lock; | ||
658 | |||
659 | /** Cached value of IMR to avoid reads in updating the bitfield */ | ||
660 | u32 pipestat[2]; | ||
661 | u32 irq_mask; | ||
662 | u32 gt_irq_mask; | ||
663 | u32 pch_irq_mask; | ||
664 | |||
665 | u32 hotplug_supported_mask; | ||
666 | struct work_struct hotplug_work; | ||
667 | |||
668 | int num_pipe; | ||
669 | int num_pch_pll; | ||
670 | |||
671 | /* For hangcheck timer */ | ||
672 | #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ | ||
673 | #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) | ||
674 | struct timer_list hangcheck_timer; | ||
675 | int hangcheck_count; | ||
676 | uint32_t last_acthd[I915_NUM_RINGS]; | ||
677 | uint32_t prev_instdone[I915_NUM_INSTDONE_REG]; | ||
678 | |||
679 | unsigned int stop_rings; | ||
680 | |||
681 | unsigned long cfb_size; | ||
682 | unsigned int cfb_fb; | ||
683 | enum plane cfb_plane; | ||
684 | int cfb_y; | ||
685 | struct intel_fbc_work *fbc_work; | ||
686 | |||
687 | struct intel_opregion opregion; | ||
688 | |||
689 | /* overlay */ | ||
690 | struct intel_overlay *overlay; | ||
691 | bool sprite_scaling_enabled; | ||
692 | |||
693 | /* LVDS info */ | ||
694 | int backlight_level; /* restore backlight to this value */ | ||
695 | bool backlight_enabled; | ||
696 | struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ | ||
697 | struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ | ||
698 | |||
699 | /* Feature bits from the VBIOS */ | ||
700 | unsigned int int_tv_support:1; | ||
701 | unsigned int lvds_dither:1; | ||
702 | unsigned int lvds_vbt:1; | ||
703 | unsigned int int_crt_support:1; | ||
704 | unsigned int lvds_use_ssc:1; | ||
705 | unsigned int display_clock_mode:1; | ||
706 | int lvds_ssc_freq; | ||
707 | unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ | ||
708 | unsigned int lvds_val; /* used for checking LVDS channel mode */ | ||
709 | struct { | ||
710 | int rate; | ||
711 | int lanes; | ||
712 | int preemphasis; | ||
713 | int vswing; | ||
714 | |||
715 | bool initialized; | ||
716 | bool support; | ||
717 | int bpp; | ||
718 | struct edp_power_seq pps; | ||
719 | } edp; | ||
720 | bool no_aux_handshake; | ||
721 | |||
722 | int crt_ddc_pin; | ||
723 | struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ | ||
724 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ | ||
725 | int num_fence_regs; /* 8 on pre-965, 16 otherwise */ | ||
726 | |||
727 | unsigned int fsb_freq, mem_freq, is_ddr3; | ||
728 | |||
729 | spinlock_t error_lock; | ||
730 | /* Protected by dev->error_lock. */ | ||
731 | struct drm_i915_error_state *first_error; | ||
732 | struct work_struct error_work; | ||
733 | struct completion error_completion; | ||
734 | struct workqueue_struct *wq; | ||
735 | |||
736 | /* Display functions */ | ||
737 | struct drm_i915_display_funcs display; | ||
738 | |||
739 | /* PCH chipset type */ | ||
740 | enum intel_pch pch_type; | ||
741 | |||
742 | unsigned long quirks; | ||
743 | |||
744 | /* Register state */ | ||
745 | bool modeset_on_lid; | ||
679 | 746 | ||
680 | struct { | 747 | struct { |
681 | /** Bridge to intel-gtt-ko */ | 748 | /** Bridge to intel-gtt-ko */ |
682 | const struct intel_gtt *gtt; | 749 | struct intel_gtt *gtt; |
683 | /** Memory allocator for GTT stolen memory */ | 750 | /** Memory allocator for GTT stolen memory */ |
684 | struct drm_mm stolen; | 751 | struct drm_mm stolen; |
685 | /** Memory allocator for GTT */ | 752 | /** Memory allocator for GTT */ |
@@ -706,8 +773,6 @@ typedef struct drm_i915_private { | |||
706 | /** PPGTT used for aliasing the PPGTT with the GTT */ | 773 | /** PPGTT used for aliasing the PPGTT with the GTT */ |
707 | struct i915_hw_ppgtt *aliasing_ppgtt; | 774 | struct i915_hw_ppgtt *aliasing_ppgtt; |
708 | 775 | ||
709 | u32 *l3_remap_info; | ||
710 | |||
711 | struct shrinker inactive_shrinker; | 776 | struct shrinker inactive_shrinker; |
712 | 777 | ||
713 | /** | 778 | /** |
@@ -785,19 +850,6 @@ typedef struct drm_i915_private { | |||
785 | u32 object_count; | 850 | u32 object_count; |
786 | } mm; | 851 | } mm; |
787 | 852 | ||
788 | /* Old dri1 support infrastructure, beware the dragons ya fools entering | ||
789 | * here! */ | ||
790 | struct { | ||
791 | unsigned allow_batchbuffer : 1; | ||
792 | u32 __iomem *gfx_hws_cpu_addr; | ||
793 | |||
794 | unsigned int cpp; | ||
795 | int back_offset; | ||
796 | int front_offset; | ||
797 | int current_page; | ||
798 | int page_flipping; | ||
799 | } dri1; | ||
800 | |||
801 | /* Kernel Modesetting */ | 853 | /* Kernel Modesetting */ |
802 | 854 | ||
803 | struct sdvo_device_mapping sdvo_mappings[2]; | 855 | struct sdvo_device_mapping sdvo_mappings[2]; |
@@ -811,6 +863,7 @@ typedef struct drm_i915_private { | |||
811 | wait_queue_head_t pending_flip_queue; | 863 | wait_queue_head_t pending_flip_queue; |
812 | 864 | ||
813 | struct intel_pch_pll pch_plls[I915_NUM_PLLS]; | 865 | struct intel_pch_pll pch_plls[I915_NUM_PLLS]; |
866 | struct intel_ddi_plls ddi_plls; | ||
814 | 867 | ||
815 | /* Reclocking support */ | 868 | /* Reclocking support */ |
816 | bool render_reclock_avail; | 869 | bool render_reclock_avail; |
@@ -820,46 +873,17 @@ typedef struct drm_i915_private { | |||
820 | u16 orig_clock; | 873 | u16 orig_clock; |
821 | int child_dev_num; | 874 | int child_dev_num; |
822 | struct child_device_config *child_dev; | 875 | struct child_device_config *child_dev; |
823 | struct drm_connector *int_lvds_connector; | ||
824 | struct drm_connector *int_edp_connector; | ||
825 | 876 | ||
826 | bool mchbar_need_disable; | 877 | bool mchbar_need_disable; |
827 | 878 | ||
879 | struct intel_l3_parity l3_parity; | ||
880 | |||
828 | /* gen6+ rps state */ | 881 | /* gen6+ rps state */ |
829 | struct { | 882 | struct intel_gen6_power_mgmt rps; |
830 | struct work_struct work; | ||
831 | u32 pm_iir; | ||
832 | /* lock - irqsave spinlock that protectects the work_struct and | ||
833 | * pm_iir. */ | ||
834 | spinlock_t lock; | ||
835 | |||
836 | /* The below variables an all the rps hw state are protected by | ||
837 | * dev->struct mutext. */ | ||
838 | u8 cur_delay; | ||
839 | u8 min_delay; | ||
840 | u8 max_delay; | ||
841 | } rps; | ||
842 | 883 | ||
843 | /* ilk-only ips/rps state. Everything in here is protected by the global | 884 | /* ilk-only ips/rps state. Everything in here is protected by the global |
844 | * mchdev_lock in intel_pm.c */ | 885 | * mchdev_lock in intel_pm.c */ |
845 | struct { | 886 | struct intel_ilk_power_mgmt ips; |
846 | u8 cur_delay; | ||
847 | u8 min_delay; | ||
848 | u8 max_delay; | ||
849 | u8 fmax; | ||
850 | u8 fstart; | ||
851 | |||
852 | u64 last_count1; | ||
853 | unsigned long last_time1; | ||
854 | unsigned long chipset_power; | ||
855 | u64 last_count2; | ||
856 | struct timespec last_time2; | ||
857 | unsigned long gfx_power; | ||
858 | u8 corr; | ||
859 | |||
860 | int c_m; | ||
861 | int r_t; | ||
862 | } ips; | ||
863 | 887 | ||
864 | enum no_fbc_reason no_fbc_reason; | 888 | enum no_fbc_reason no_fbc_reason; |
865 | 889 | ||
@@ -871,14 +895,25 @@ typedef struct drm_i915_private { | |||
871 | /* list of fbdev register on this device */ | 895 | /* list of fbdev register on this device */ |
872 | struct intel_fbdev *fbdev; | 896 | struct intel_fbdev *fbdev; |
873 | 897 | ||
898 | /* | ||
899 | * The console may be contended at resume, but we don't | ||
900 | * want it to block on it. | ||
901 | */ | ||
902 | struct work_struct console_resume_work; | ||
903 | |||
874 | struct backlight_device *backlight; | 904 | struct backlight_device *backlight; |
875 | 905 | ||
876 | struct drm_property *broadcast_rgb_property; | 906 | struct drm_property *broadcast_rgb_property; |
877 | struct drm_property *force_audio_property; | 907 | struct drm_property *force_audio_property; |
878 | 908 | ||
879 | struct work_struct parity_error_work; | ||
880 | bool hw_contexts_disabled; | 909 | bool hw_contexts_disabled; |
881 | uint32_t hw_context_size; | 910 | uint32_t hw_context_size; |
911 | |||
912 | struct i915_suspend_saved_registers regfile; | ||
913 | |||
914 | /* Old dri1 support infrastructure, beware the dragons ya fools entering | ||
915 | * here! */ | ||
916 | struct i915_dri1_state dri1; | ||
882 | } drm_i915_private_t; | 917 | } drm_i915_private_t; |
883 | 918 | ||
884 | /* Iterate over initialised rings */ | 919 | /* Iterate over initialised rings */ |
@@ -1120,6 +1155,9 @@ struct drm_i915_file_private { | |||
1120 | #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) | 1155 | #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) |
1121 | #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) | 1156 | #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) |
1122 | #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) | 1157 | #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) |
1158 | #define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \ | ||
1159 | (dev)->pci_device == 0x0152 || \ | ||
1160 | (dev)->pci_device == 0x015a) | ||
1123 | #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) | 1161 | #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) |
1124 | #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) | 1162 | #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) |
1125 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) | 1163 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) |
@@ -1250,6 +1288,7 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); | |||
1250 | extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); | 1288 | extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); |
1251 | extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); | 1289 | extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); |
1252 | 1290 | ||
1291 | extern void intel_console_resume(struct work_struct *work); | ||
1253 | 1292 | ||
1254 | /* i915_irq.c */ | 1293 | /* i915_irq.c */ |
1255 | void i915_hangcheck_elapsed(unsigned long data); | 1294 | void i915_hangcheck_elapsed(unsigned long data); |
@@ -1257,6 +1296,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged); | |||
1257 | 1296 | ||
1258 | extern void intel_irq_init(struct drm_device *dev); | 1297 | extern void intel_irq_init(struct drm_device *dev); |
1259 | extern void intel_gt_init(struct drm_device *dev); | 1298 | extern void intel_gt_init(struct drm_device *dev); |
1299 | extern void intel_gt_reset(struct drm_device *dev); | ||
1260 | 1300 | ||
1261 | void i915_error_state_free(struct kref *error_ref); | 1301 | void i915_error_state_free(struct kref *error_ref); |
1262 | 1302 | ||
@@ -1499,6 +1539,14 @@ void i915_gem_init_global_gtt(struct drm_device *dev, | |||
1499 | unsigned long start, | 1539 | unsigned long start, |
1500 | unsigned long mappable_end, | 1540 | unsigned long mappable_end, |
1501 | unsigned long end); | 1541 | unsigned long end); |
1542 | int i915_gem_gtt_init(struct drm_device *dev); | ||
1543 | void i915_gem_gtt_fini(struct drm_device *dev); | ||
1544 | extern inline void i915_gem_chipset_flush(struct drm_device *dev) | ||
1545 | { | ||
1546 | if (INTEL_INFO(dev)->gen < 6) | ||
1547 | intel_gtt_chipset_flush(); | ||
1548 | } | ||
1549 | |||
1502 | 1550 | ||
1503 | /* i915_gem_evict.c */ | 1551 | /* i915_gem_evict.c */ |
1504 | int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, | 1552 | int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, |
@@ -1628,6 +1676,9 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); | |||
1628 | void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); | 1676 | void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); |
1629 | int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); | 1677 | int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); |
1630 | 1678 | ||
1679 | int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); | ||
1680 | int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); | ||
1681 | |||
1631 | #define __i915_read(x, y) \ | 1682 | #define __i915_read(x, y) \ |
1632 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); | 1683 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); |
1633 | 1684 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 107f09befe92..a2f1b8652d68 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -845,12 +845,12 @@ out: | |||
845 | * domain anymore. */ | 845 | * domain anymore. */ |
846 | if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { | 846 | if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { |
847 | i915_gem_clflush_object(obj); | 847 | i915_gem_clflush_object(obj); |
848 | intel_gtt_chipset_flush(); | 848 | i915_gem_chipset_flush(dev); |
849 | } | 849 | } |
850 | } | 850 | } |
851 | 851 | ||
852 | if (needs_clflush_after) | 852 | if (needs_clflush_after) |
853 | intel_gtt_chipset_flush(); | 853 | i915_gem_chipset_flush(dev); |
854 | 854 | ||
855 | return ret; | 855 | return ret; |
856 | } | 856 | } |
@@ -2022,12 +2022,12 @@ i915_add_request(struct intel_ring_buffer *ring, | |||
2022 | if (!dev_priv->mm.suspended) { | 2022 | if (!dev_priv->mm.suspended) { |
2023 | if (i915_enable_hangcheck) { | 2023 | if (i915_enable_hangcheck) { |
2024 | mod_timer(&dev_priv->hangcheck_timer, | 2024 | mod_timer(&dev_priv->hangcheck_timer, |
2025 | jiffies + | 2025 | round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); |
2026 | msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | ||
2027 | } | 2026 | } |
2028 | if (was_empty) { | 2027 | if (was_empty) { |
2029 | queue_delayed_work(dev_priv->wq, | 2028 | queue_delayed_work(dev_priv->wq, |
2030 | &dev_priv->mm.retire_work, HZ); | 2029 | &dev_priv->mm.retire_work, |
2030 | round_jiffies_up_relative(HZ)); | ||
2031 | intel_mark_busy(dev_priv->dev); | 2031 | intel_mark_busy(dev_priv->dev); |
2032 | } | 2032 | } |
2033 | } | 2033 | } |
@@ -2218,7 +2218,8 @@ i915_gem_retire_work_handler(struct work_struct *work) | |||
2218 | 2218 | ||
2219 | /* Come back later if the device is busy... */ | 2219 | /* Come back later if the device is busy... */ |
2220 | if (!mutex_trylock(&dev->struct_mutex)) { | 2220 | if (!mutex_trylock(&dev->struct_mutex)) { |
2221 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); | 2221 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, |
2222 | round_jiffies_up_relative(HZ)); | ||
2222 | return; | 2223 | return; |
2223 | } | 2224 | } |
2224 | 2225 | ||
@@ -2236,7 +2237,8 @@ i915_gem_retire_work_handler(struct work_struct *work) | |||
2236 | } | 2237 | } |
2237 | 2238 | ||
2238 | if (!dev_priv->mm.suspended && !idle) | 2239 | if (!dev_priv->mm.suspended && !idle) |
2239 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); | 2240 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, |
2241 | round_jiffies_up_relative(HZ)); | ||
2240 | if (idle) | 2242 | if (idle) |
2241 | intel_mark_idle(dev); | 2243 | intel_mark_idle(dev); |
2242 | 2244 | ||
@@ -3059,7 +3061,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) | |||
3059 | return; | 3061 | return; |
3060 | 3062 | ||
3061 | i915_gem_clflush_object(obj); | 3063 | i915_gem_clflush_object(obj); |
3062 | intel_gtt_chipset_flush(); | 3064 | i915_gem_chipset_flush(obj->base.dev); |
3063 | old_write_domain = obj->base.write_domain; | 3065 | old_write_domain = obj->base.write_domain; |
3064 | obj->base.write_domain = 0; | 3066 | obj->base.write_domain = 0; |
3065 | 3067 | ||
@@ -3832,7 +3834,7 @@ void i915_gem_l3_remap(struct drm_device *dev) | |||
3832 | if (!IS_IVYBRIDGE(dev)) | 3834 | if (!IS_IVYBRIDGE(dev)) |
3833 | return; | 3835 | return; |
3834 | 3836 | ||
3835 | if (!dev_priv->mm.l3_remap_info) | 3837 | if (!dev_priv->l3_parity.remap_info) |
3836 | return; | 3838 | return; |
3837 | 3839 | ||
3838 | misccpctl = I915_READ(GEN7_MISCCPCTL); | 3840 | misccpctl = I915_READ(GEN7_MISCCPCTL); |
@@ -3841,12 +3843,12 @@ void i915_gem_l3_remap(struct drm_device *dev) | |||
3841 | 3843 | ||
3842 | for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { | 3844 | for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { |
3843 | u32 remap = I915_READ(GEN7_L3LOG_BASE + i); | 3845 | u32 remap = I915_READ(GEN7_L3LOG_BASE + i); |
3844 | if (remap && remap != dev_priv->mm.l3_remap_info[i/4]) | 3846 | if (remap && remap != dev_priv->l3_parity.remap_info[i/4]) |
3845 | DRM_DEBUG("0x%x was already programmed to %x\n", | 3847 | DRM_DEBUG("0x%x was already programmed to %x\n", |
3846 | GEN7_L3LOG_BASE + i, remap); | 3848 | GEN7_L3LOG_BASE + i, remap); |
3847 | if (remap && !dev_priv->mm.l3_remap_info[i/4]) | 3849 | if (remap && !dev_priv->l3_parity.remap_info[i/4]) |
3848 | DRM_DEBUG_DRIVER("Clearing remapped register\n"); | 3850 | DRM_DEBUG_DRIVER("Clearing remapped register\n"); |
3849 | I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]); | 3851 | I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]); |
3850 | } | 3852 | } |
3851 | 3853 | ||
3852 | /* Make sure all the writes land before disabling dop clock gating */ | 3854 | /* Make sure all the writes land before disabling dop clock gating */ |
@@ -3876,68 +3878,6 @@ void i915_gem_init_swizzling(struct drm_device *dev) | |||
3876 | I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); | 3878 | I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); |
3877 | } | 3879 | } |
3878 | 3880 | ||
3879 | void i915_gem_init_ppgtt(struct drm_device *dev) | ||
3880 | { | ||
3881 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3882 | uint32_t pd_offset; | ||
3883 | struct intel_ring_buffer *ring; | ||
3884 | struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; | ||
3885 | uint32_t __iomem *pd_addr; | ||
3886 | uint32_t pd_entry; | ||
3887 | int i; | ||
3888 | |||
3889 | if (!dev_priv->mm.aliasing_ppgtt) | ||
3890 | return; | ||
3891 | |||
3892 | |||
3893 | pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t); | ||
3894 | for (i = 0; i < ppgtt->num_pd_entries; i++) { | ||
3895 | dma_addr_t pt_addr; | ||
3896 | |||
3897 | if (dev_priv->mm.gtt->needs_dmar) | ||
3898 | pt_addr = ppgtt->pt_dma_addr[i]; | ||
3899 | else | ||
3900 | pt_addr = page_to_phys(ppgtt->pt_pages[i]); | ||
3901 | |||
3902 | pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); | ||
3903 | pd_entry |= GEN6_PDE_VALID; | ||
3904 | |||
3905 | writel(pd_entry, pd_addr + i); | ||
3906 | } | ||
3907 | readl(pd_addr); | ||
3908 | |||
3909 | pd_offset = ppgtt->pd_offset; | ||
3910 | pd_offset /= 64; /* in cachelines, */ | ||
3911 | pd_offset <<= 16; | ||
3912 | |||
3913 | if (INTEL_INFO(dev)->gen == 6) { | ||
3914 | uint32_t ecochk, gab_ctl, ecobits; | ||
3915 | |||
3916 | ecobits = I915_READ(GAC_ECO_BITS); | ||
3917 | I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); | ||
3918 | |||
3919 | gab_ctl = I915_READ(GAB_CTL); | ||
3920 | I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); | ||
3921 | |||
3922 | ecochk = I915_READ(GAM_ECOCHK); | ||
3923 | I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | | ||
3924 | ECOCHK_PPGTT_CACHE64B); | ||
3925 | I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); | ||
3926 | } else if (INTEL_INFO(dev)->gen >= 7) { | ||
3927 | I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B); | ||
3928 | /* GFX_MODE is per-ring on gen7+ */ | ||
3929 | } | ||
3930 | |||
3931 | for_each_ring(ring, dev_priv, i) { | ||
3932 | if (INTEL_INFO(dev)->gen >= 7) | ||
3933 | I915_WRITE(RING_MODE_GEN7(ring), | ||
3934 | _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); | ||
3935 | |||
3936 | I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); | ||
3937 | I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset); | ||
3938 | } | ||
3939 | } | ||
3940 | |||
3941 | static bool | 3881 | static bool |
3942 | intel_enable_blt(struct drm_device *dev) | 3882 | intel_enable_blt(struct drm_device *dev) |
3943 | { | 3883 | { |
@@ -3960,7 +3900,7 @@ i915_gem_init_hw(struct drm_device *dev) | |||
3960 | drm_i915_private_t *dev_priv = dev->dev_private; | 3900 | drm_i915_private_t *dev_priv = dev->dev_private; |
3961 | int ret; | 3901 | int ret; |
3962 | 3902 | ||
3963 | if (!intel_enable_gtt()) | 3903 | if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) |
3964 | return -EIO; | 3904 | return -EIO; |
3965 | 3905 | ||
3966 | if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) | 3906 | if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) |
@@ -4295,7 +4235,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev, | |||
4295 | page_cache_release(page); | 4235 | page_cache_release(page); |
4296 | } | 4236 | } |
4297 | } | 4237 | } |
4298 | intel_gtt_chipset_flush(); | 4238 | i915_gem_chipset_flush(dev); |
4299 | 4239 | ||
4300 | obj->phys_obj->cur_obj = NULL; | 4240 | obj->phys_obj->cur_obj = NULL; |
4301 | obj->phys_obj = NULL; | 4241 | obj->phys_obj = NULL; |
@@ -4382,7 +4322,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, | |||
4382 | return -EFAULT; | 4322 | return -EFAULT; |
4383 | } | 4323 | } |
4384 | 4324 | ||
4385 | intel_gtt_chipset_flush(); | 4325 | i915_gem_chipset_flush(dev); |
4386 | return 0; | 4326 | return 0; |
4387 | } | 4327 | } |
4388 | 4328 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 05ed42f203d7..0e510df80d73 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
@@ -146,7 +146,7 @@ create_hw_context(struct drm_device *dev, | |||
146 | struct i915_hw_context *ctx; | 146 | struct i915_hw_context *ctx; |
147 | int ret, id; | 147 | int ret, id; |
148 | 148 | ||
149 | ctx = kzalloc(sizeof(struct drm_i915_file_private), GFP_KERNEL); | 149 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); |
150 | if (ctx == NULL) | 150 | if (ctx == NULL) |
151 | return ERR_PTR(-ENOMEM); | 151 | return ERR_PTR(-ENOMEM); |
152 | 152 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 3eea143749f6..d80e9dd00c48 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -672,7 +672,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, | |||
672 | } | 672 | } |
673 | 673 | ||
674 | if (flush_domains & I915_GEM_DOMAIN_CPU) | 674 | if (flush_domains & I915_GEM_DOMAIN_CPU) |
675 | intel_gtt_chipset_flush(); | 675 | i915_gem_chipset_flush(ring->dev); |
676 | 676 | ||
677 | if (flush_domains & I915_GEM_DOMAIN_GTT) | 677 | if (flush_domains & I915_GEM_DOMAIN_GTT) |
678 | wmb(); | 678 | wmb(); |
@@ -800,6 +800,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
800 | u32 exec_start, exec_len; | 800 | u32 exec_start, exec_len; |
801 | u32 seqno; | 801 | u32 seqno; |
802 | u32 mask; | 802 | u32 mask; |
803 | u32 flags; | ||
803 | int ret, mode, i; | 804 | int ret, mode, i; |
804 | 805 | ||
805 | if (!i915_gem_check_execbuffer(args)) { | 806 | if (!i915_gem_check_execbuffer(args)) { |
@@ -811,6 +812,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
811 | if (ret) | 812 | if (ret) |
812 | return ret; | 813 | return ret; |
813 | 814 | ||
815 | flags = 0; | ||
816 | if (args->flags & I915_EXEC_SECURE) { | ||
817 | if (!file->is_master || !capable(CAP_SYS_ADMIN)) | ||
818 | return -EPERM; | ||
819 | |||
820 | flags |= I915_DISPATCH_SECURE; | ||
821 | } | ||
822 | |||
814 | switch (args->flags & I915_EXEC_RING_MASK) { | 823 | switch (args->flags & I915_EXEC_RING_MASK) { |
815 | case I915_EXEC_DEFAULT: | 824 | case I915_EXEC_DEFAULT: |
816 | case I915_EXEC_RENDER: | 825 | case I915_EXEC_RENDER: |
@@ -983,6 +992,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
983 | } | 992 | } |
984 | batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; | 993 | batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; |
985 | 994 | ||
995 | /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure | ||
996 | * batch" bit. Hence we need to pin secure batches into the global gtt. | ||
997 | * hsw should have this fixed, but let's be paranoid and do it | ||
998 | * unconditionally for now. */ | ||
999 | if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) | ||
1000 | i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); | ||
1001 | |||
986 | ret = i915_gem_execbuffer_move_to_gpu(ring, &objects); | 1002 | ret = i915_gem_execbuffer_move_to_gpu(ring, &objects); |
987 | if (ret) | 1003 | if (ret) |
988 | goto err; | 1004 | goto err; |
@@ -1028,7 +1044,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1028 | goto err; | 1044 | goto err; |
1029 | } | 1045 | } |
1030 | 1046 | ||
1031 | trace_i915_gem_ring_dispatch(ring, seqno); | 1047 | trace_i915_gem_ring_dispatch(ring, seqno, flags); |
1032 | 1048 | ||
1033 | exec_start = batch_obj->gtt_offset + args->batch_start_offset; | 1049 | exec_start = batch_obj->gtt_offset + args->batch_start_offset; |
1034 | exec_len = args->batch_len; | 1050 | exec_len = args->batch_len; |
@@ -1040,12 +1056,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1040 | goto err; | 1056 | goto err; |
1041 | 1057 | ||
1042 | ret = ring->dispatch_execbuffer(ring, | 1058 | ret = ring->dispatch_execbuffer(ring, |
1043 | exec_start, exec_len); | 1059 | exec_start, exec_len, |
1060 | flags); | ||
1044 | if (ret) | 1061 | if (ret) |
1045 | goto err; | 1062 | goto err; |
1046 | } | 1063 | } |
1047 | } else { | 1064 | } else { |
1048 | ret = ring->dispatch_execbuffer(ring, exec_start, exec_len); | 1065 | ret = ring->dispatch_execbuffer(ring, |
1066 | exec_start, exec_len, | ||
1067 | flags); | ||
1049 | if (ret) | 1068 | if (ret) |
1050 | goto err; | 1069 | goto err; |
1051 | } | 1070 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index df470b5e8d36..35fec1e61346 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -28,19 +28,67 @@ | |||
28 | #include "i915_trace.h" | 28 | #include "i915_trace.h" |
29 | #include "intel_drv.h" | 29 | #include "intel_drv.h" |
30 | 30 | ||
31 | typedef uint32_t gtt_pte_t; | ||
32 | |||
33 | /* PPGTT stuff */ | ||
34 | #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) | ||
35 | |||
36 | #define GEN6_PDE_VALID (1 << 0) | ||
37 | /* gen6+ has bit 11-4 for physical addr bit 39-32 */ | ||
38 | #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) | ||
39 | |||
40 | #define GEN6_PTE_VALID (1 << 0) | ||
41 | #define GEN6_PTE_UNCACHED (1 << 1) | ||
42 | #define HSW_PTE_UNCACHED (0) | ||
43 | #define GEN6_PTE_CACHE_LLC (2 << 1) | ||
44 | #define GEN6_PTE_CACHE_LLC_MLC (3 << 1) | ||
45 | #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) | ||
46 | |||
47 | static inline gtt_pte_t pte_encode(struct drm_device *dev, | ||
48 | dma_addr_t addr, | ||
49 | enum i915_cache_level level) | ||
50 | { | ||
51 | gtt_pte_t pte = GEN6_PTE_VALID; | ||
52 | pte |= GEN6_PTE_ADDR_ENCODE(addr); | ||
53 | |||
54 | switch (level) { | ||
55 | case I915_CACHE_LLC_MLC: | ||
56 | /* Haswell doesn't set L3 this way */ | ||
57 | if (IS_HASWELL(dev)) | ||
58 | pte |= GEN6_PTE_CACHE_LLC; | ||
59 | else | ||
60 | pte |= GEN6_PTE_CACHE_LLC_MLC; | ||
61 | break; | ||
62 | case I915_CACHE_LLC: | ||
63 | pte |= GEN6_PTE_CACHE_LLC; | ||
64 | break; | ||
65 | case I915_CACHE_NONE: | ||
66 | if (IS_HASWELL(dev)) | ||
67 | pte |= HSW_PTE_UNCACHED; | ||
68 | else | ||
69 | pte |= GEN6_PTE_UNCACHED; | ||
70 | break; | ||
71 | default: | ||
72 | BUG(); | ||
73 | } | ||
74 | |||
75 | |||
76 | return pte; | ||
77 | } | ||
78 | |||
31 | /* PPGTT support for Sandybdrige/Gen6 and later */ | 79 | /* PPGTT support for Sandybdrige/Gen6 and later */ |
32 | static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, | 80 | static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, |
33 | unsigned first_entry, | 81 | unsigned first_entry, |
34 | unsigned num_entries) | 82 | unsigned num_entries) |
35 | { | 83 | { |
36 | uint32_t *pt_vaddr; | 84 | gtt_pte_t *pt_vaddr; |
37 | uint32_t scratch_pte; | 85 | gtt_pte_t scratch_pte; |
38 | unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; | 86 | unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; |
39 | unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; | 87 | unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; |
40 | unsigned last_pte, i; | 88 | unsigned last_pte, i; |
41 | 89 | ||
42 | scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr); | 90 | scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr, |
43 | scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC; | 91 | I915_CACHE_LLC); |
44 | 92 | ||
45 | while (num_entries) { | 93 | while (num_entries) { |
46 | last_pte = first_pte + num_entries; | 94 | last_pte = first_pte + num_entries; |
@@ -77,6 +125,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) | |||
77 | if (!ppgtt) | 125 | if (!ppgtt) |
78 | return ret; | 126 | return ret; |
79 | 127 | ||
128 | ppgtt->dev = dev; | ||
80 | ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; | 129 | ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; |
81 | ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, | 130 | ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, |
82 | GFP_KERNEL); | 131 | GFP_KERNEL); |
@@ -118,7 +167,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) | |||
118 | i915_ppgtt_clear_range(ppgtt, 0, | 167 | i915_ppgtt_clear_range(ppgtt, 0, |
119 | ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); | 168 | ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); |
120 | 169 | ||
121 | ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t); | 170 | ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t); |
122 | 171 | ||
123 | dev_priv->mm.aliasing_ppgtt = ppgtt; | 172 | dev_priv->mm.aliasing_ppgtt = ppgtt; |
124 | 173 | ||
@@ -168,9 +217,9 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) | |||
168 | static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, | 217 | static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, |
169 | const struct sg_table *pages, | 218 | const struct sg_table *pages, |
170 | unsigned first_entry, | 219 | unsigned first_entry, |
171 | uint32_t pte_flags) | 220 | enum i915_cache_level cache_level) |
172 | { | 221 | { |
173 | uint32_t *pt_vaddr, pte; | 222 | gtt_pte_t *pt_vaddr; |
174 | unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; | 223 | unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; |
175 | unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; | 224 | unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; |
176 | unsigned i, j, m, segment_len; | 225 | unsigned i, j, m, segment_len; |
@@ -188,8 +237,8 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, | |||
188 | 237 | ||
189 | for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) { | 238 | for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) { |
190 | page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT); | 239 | page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT); |
191 | pte = GEN6_PTE_ADDR_ENCODE(page_addr); | 240 | pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr, |
192 | pt_vaddr[j] = pte | pte_flags; | 241 | cache_level); |
193 | 242 | ||
194 | /* grab the next page */ | 243 | /* grab the next page */ |
195 | if (++m == segment_len) { | 244 | if (++m == segment_len) { |
@@ -213,29 +262,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, | |||
213 | struct drm_i915_gem_object *obj, | 262 | struct drm_i915_gem_object *obj, |
214 | enum i915_cache_level cache_level) | 263 | enum i915_cache_level cache_level) |
215 | { | 264 | { |
216 | uint32_t pte_flags = GEN6_PTE_VALID; | ||
217 | |||
218 | switch (cache_level) { | ||
219 | case I915_CACHE_LLC_MLC: | ||
220 | pte_flags |= GEN6_PTE_CACHE_LLC_MLC; | ||
221 | break; | ||
222 | case I915_CACHE_LLC: | ||
223 | pte_flags |= GEN6_PTE_CACHE_LLC; | ||
224 | break; | ||
225 | case I915_CACHE_NONE: | ||
226 | if (IS_HASWELL(obj->base.dev)) | ||
227 | pte_flags |= HSW_PTE_UNCACHED; | ||
228 | else | ||
229 | pte_flags |= GEN6_PTE_UNCACHED; | ||
230 | break; | ||
231 | default: | ||
232 | BUG(); | ||
233 | } | ||
234 | |||
235 | i915_ppgtt_insert_sg_entries(ppgtt, | 265 | i915_ppgtt_insert_sg_entries(ppgtt, |
236 | obj->pages, | 266 | obj->pages, |
237 | obj->gtt_space->start >> PAGE_SHIFT, | 267 | obj->gtt_space->start >> PAGE_SHIFT, |
238 | pte_flags); | 268 | cache_level); |
239 | } | 269 | } |
240 | 270 | ||
241 | void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, | 271 | void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, |
@@ -246,23 +276,65 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, | |||
246 | obj->base.size >> PAGE_SHIFT); | 276 | obj->base.size >> PAGE_SHIFT); |
247 | } | 277 | } |
248 | 278 | ||
249 | /* XXX kill agp_type! */ | 279 | void i915_gem_init_ppgtt(struct drm_device *dev) |
250 | static unsigned int cache_level_to_agp_type(struct drm_device *dev, | ||
251 | enum i915_cache_level cache_level) | ||
252 | { | 280 | { |
253 | switch (cache_level) { | 281 | drm_i915_private_t *dev_priv = dev->dev_private; |
254 | case I915_CACHE_LLC_MLC: | 282 | uint32_t pd_offset; |
255 | if (INTEL_INFO(dev)->gen >= 6) | 283 | struct intel_ring_buffer *ring; |
256 | return AGP_USER_CACHED_MEMORY_LLC_MLC; | 284 | struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; |
257 | /* Older chipsets do not have this extra level of CPU | 285 | uint32_t __iomem *pd_addr; |
258 | * cacheing, so fallthrough and request the PTE simply | 286 | uint32_t pd_entry; |
259 | * as cached. | 287 | int i; |
260 | */ | 288 | |
261 | case I915_CACHE_LLC: | 289 | if (!dev_priv->mm.aliasing_ppgtt) |
262 | return AGP_USER_CACHED_MEMORY; | 290 | return; |
263 | default: | 291 | |
264 | case I915_CACHE_NONE: | 292 | |
265 | return AGP_USER_MEMORY; | 293 | pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t); |
294 | for (i = 0; i < ppgtt->num_pd_entries; i++) { | ||
295 | dma_addr_t pt_addr; | ||
296 | |||
297 | if (dev_priv->mm.gtt->needs_dmar) | ||
298 | pt_addr = ppgtt->pt_dma_addr[i]; | ||
299 | else | ||
300 | pt_addr = page_to_phys(ppgtt->pt_pages[i]); | ||
301 | |||
302 | pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); | ||
303 | pd_entry |= GEN6_PDE_VALID; | ||
304 | |||
305 | writel(pd_entry, pd_addr + i); | ||
306 | } | ||
307 | readl(pd_addr); | ||
308 | |||
309 | pd_offset = ppgtt->pd_offset; | ||
310 | pd_offset /= 64; /* in cachelines, */ | ||
311 | pd_offset <<= 16; | ||
312 | |||
313 | if (INTEL_INFO(dev)->gen == 6) { | ||
314 | uint32_t ecochk, gab_ctl, ecobits; | ||
315 | |||
316 | ecobits = I915_READ(GAC_ECO_BITS); | ||
317 | I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); | ||
318 | |||
319 | gab_ctl = I915_READ(GAB_CTL); | ||
320 | I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); | ||
321 | |||
322 | ecochk = I915_READ(GAM_ECOCHK); | ||
323 | I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | | ||
324 | ECOCHK_PPGTT_CACHE64B); | ||
325 | I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); | ||
326 | } else if (INTEL_INFO(dev)->gen >= 7) { | ||
327 | I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B); | ||
328 | /* GFX_MODE is per-ring on gen7+ */ | ||
329 | } | ||
330 | |||
331 | for_each_ring(ring, dev_priv, i) { | ||
332 | if (INTEL_INFO(dev)->gen >= 7) | ||
333 | I915_WRITE(RING_MODE_GEN7(ring), | ||
334 | _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); | ||
335 | |||
336 | I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); | ||
337 | I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset); | ||
266 | } | 338 | } |
267 | } | 339 | } |
268 | 340 | ||
@@ -288,13 +360,38 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) | |||
288 | dev_priv->mm.interruptible = interruptible; | 360 | dev_priv->mm.interruptible = interruptible; |
289 | } | 361 | } |
290 | 362 | ||
363 | |||
364 | static void i915_ggtt_clear_range(struct drm_device *dev, | ||
365 | unsigned first_entry, | ||
366 | unsigned num_entries) | ||
367 | { | ||
368 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
369 | gtt_pte_t scratch_pte; | ||
370 | volatile void __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry; | ||
371 | const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry; | ||
372 | |||
373 | if (INTEL_INFO(dev)->gen < 6) { | ||
374 | intel_gtt_clear_range(first_entry, num_entries); | ||
375 | return; | ||
376 | } | ||
377 | |||
378 | if (WARN(num_entries > max_entries, | ||
379 | "First entry = %d; Num entries = %d (max=%d)\n", | ||
380 | first_entry, num_entries, max_entries)) | ||
381 | num_entries = max_entries; | ||
382 | |||
383 | scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC); | ||
384 | memset_io(gtt_base, scratch_pte, num_entries * sizeof(scratch_pte)); | ||
385 | readl(gtt_base); | ||
386 | } | ||
387 | |||
291 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) | 388 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) |
292 | { | 389 | { |
293 | struct drm_i915_private *dev_priv = dev->dev_private; | 390 | struct drm_i915_private *dev_priv = dev->dev_private; |
294 | struct drm_i915_gem_object *obj; | 391 | struct drm_i915_gem_object *obj; |
295 | 392 | ||
296 | /* First fill our portion of the GTT with scratch pages */ | 393 | /* First fill our portion of the GTT with scratch pages */ |
297 | intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE, | 394 | i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE, |
298 | (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); | 395 | (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); |
299 | 396 | ||
300 | list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { | 397 | list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { |
@@ -302,7 +399,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) | |||
302 | i915_gem_gtt_bind_object(obj, obj->cache_level); | 399 | i915_gem_gtt_bind_object(obj, obj->cache_level); |
303 | } | 400 | } |
304 | 401 | ||
305 | intel_gtt_chipset_flush(); | 402 | i915_gem_chipset_flush(dev); |
306 | } | 403 | } |
307 | 404 | ||
308 | int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) | 405 | int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) |
@@ -318,21 +415,76 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) | |||
318 | return 0; | 415 | return 0; |
319 | } | 416 | } |
320 | 417 | ||
418 | /* | ||
419 | * Binds an object into the global gtt with the specified cache level. The object | ||
420 | * will be accessible to the GPU via commands whose operands reference offsets | ||
421 | * within the global GTT as well as accessible by the GPU through the GMADR | ||
422 | * mapped BAR (dev_priv->mm.gtt->gtt). | ||
423 | */ | ||
424 | static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj, | ||
425 | enum i915_cache_level level) | ||
426 | { | ||
427 | struct drm_device *dev = obj->base.dev; | ||
428 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
429 | struct sg_table *st = obj->pages; | ||
430 | struct scatterlist *sg = st->sgl; | ||
431 | const int first_entry = obj->gtt_space->start >> PAGE_SHIFT; | ||
432 | const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry; | ||
433 | gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry; | ||
434 | int unused, i = 0; | ||
435 | unsigned int len, m = 0; | ||
436 | dma_addr_t addr; | ||
437 | |||
438 | for_each_sg(st->sgl, sg, st->nents, unused) { | ||
439 | len = sg_dma_len(sg) >> PAGE_SHIFT; | ||
440 | for (m = 0; m < len; m++) { | ||
441 | addr = sg_dma_address(sg) + (m << PAGE_SHIFT); | ||
442 | iowrite32(pte_encode(dev, addr, level), >t_entries[i]); | ||
443 | i++; | ||
444 | } | ||
445 | } | ||
446 | |||
447 | BUG_ON(i > max_entries); | ||
448 | BUG_ON(i != obj->base.size / PAGE_SIZE); | ||
449 | |||
450 | /* XXX: This serves as a posting read to make sure that the PTE has | ||
451 | * actually been updated. There is some concern that even though | ||
452 | * registers and PTEs are within the same BAR that they are potentially | ||
453 | * of NUMA access patterns. Therefore, even with the way we assume | ||
454 | * hardware should work, we must keep this posting read for paranoia. | ||
455 | */ | ||
456 | if (i != 0) | ||
457 | WARN_ON(readl(>t_entries[i-1]) != pte_encode(dev, addr, level)); | ||
458 | |||
459 | /* This next bit makes the above posting read even more important. We | ||
460 | * want to flush the TLBs only after we're certain all the PTE updates | ||
461 | * have finished. | ||
462 | */ | ||
463 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); | ||
464 | POSTING_READ(GFX_FLSH_CNTL_GEN6); | ||
465 | } | ||
466 | |||
321 | void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, | 467 | void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, |
322 | enum i915_cache_level cache_level) | 468 | enum i915_cache_level cache_level) |
323 | { | 469 | { |
324 | struct drm_device *dev = obj->base.dev; | 470 | struct drm_device *dev = obj->base.dev; |
325 | unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); | 471 | if (INTEL_INFO(dev)->gen < 6) { |
472 | unsigned int flags = (cache_level == I915_CACHE_NONE) ? | ||
473 | AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; | ||
474 | intel_gtt_insert_sg_entries(obj->pages, | ||
475 | obj->gtt_space->start >> PAGE_SHIFT, | ||
476 | flags); | ||
477 | } else { | ||
478 | gen6_ggtt_bind_object(obj, cache_level); | ||
479 | } | ||
326 | 480 | ||
327 | intel_gtt_insert_sg_entries(obj->pages, | ||
328 | obj->gtt_space->start >> PAGE_SHIFT, | ||
329 | agp_type); | ||
330 | obj->has_global_gtt_mapping = 1; | 481 | obj->has_global_gtt_mapping = 1; |
331 | } | 482 | } |
332 | 483 | ||
333 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) | 484 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) |
334 | { | 485 | { |
335 | intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, | 486 | i915_ggtt_clear_range(obj->base.dev, |
487 | obj->gtt_space->start >> PAGE_SHIFT, | ||
336 | obj->base.size >> PAGE_SHIFT); | 488 | obj->base.size >> PAGE_SHIFT); |
337 | 489 | ||
338 | obj->has_global_gtt_mapping = 0; | 490 | obj->has_global_gtt_mapping = 0; |
@@ -390,5 +542,165 @@ void i915_gem_init_global_gtt(struct drm_device *dev, | |||
390 | dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; | 542 | dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; |
391 | 543 | ||
392 | /* ... but ensure that we clear the entire range. */ | 544 | /* ... but ensure that we clear the entire range. */ |
393 | intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE); | 545 | i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE); |
546 | } | ||
547 | |||
548 | static int setup_scratch_page(struct drm_device *dev) | ||
549 | { | ||
550 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
551 | struct page *page; | ||
552 | dma_addr_t dma_addr; | ||
553 | |||
554 | page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); | ||
555 | if (page == NULL) | ||
556 | return -ENOMEM; | ||
557 | get_page(page); | ||
558 | set_pages_uc(page, 1); | ||
559 | |||
560 | #ifdef CONFIG_INTEL_IOMMU | ||
561 | dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE, | ||
562 | PCI_DMA_BIDIRECTIONAL); | ||
563 | if (pci_dma_mapping_error(dev->pdev, dma_addr)) | ||
564 | return -EINVAL; | ||
565 | #else | ||
566 | dma_addr = page_to_phys(page); | ||
567 | #endif | ||
568 | dev_priv->mm.gtt->scratch_page = page; | ||
569 | dev_priv->mm.gtt->scratch_page_dma = dma_addr; | ||
570 | |||
571 | return 0; | ||
572 | } | ||
573 | |||
574 | static void teardown_scratch_page(struct drm_device *dev) | ||
575 | { | ||
576 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
577 | set_pages_wb(dev_priv->mm.gtt->scratch_page, 1); | ||
578 | pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma, | ||
579 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
580 | put_page(dev_priv->mm.gtt->scratch_page); | ||
581 | __free_page(dev_priv->mm.gtt->scratch_page); | ||
582 | } | ||
583 | |||
584 | static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) | ||
585 | { | ||
586 | snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; | ||
587 | snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; | ||
588 | return snb_gmch_ctl << 20; | ||
589 | } | ||
590 | |||
591 | static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl) | ||
592 | { | ||
593 | snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT; | ||
594 | snb_gmch_ctl &= SNB_GMCH_GMS_MASK; | ||
595 | return snb_gmch_ctl << 25; /* 32 MB units */ | ||
596 | } | ||
597 | |||
598 | static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl) | ||
599 | { | ||
600 | static const int stolen_decoder[] = { | ||
601 | 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352}; | ||
602 | snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT; | ||
603 | snb_gmch_ctl &= IVB_GMCH_GMS_MASK; | ||
604 | return stolen_decoder[snb_gmch_ctl] << 20; | ||
605 | } | ||
606 | |||
607 | int i915_gem_gtt_init(struct drm_device *dev) | ||
608 | { | ||
609 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
610 | phys_addr_t gtt_bus_addr; | ||
611 | u16 snb_gmch_ctl; | ||
612 | u32 tmp; | ||
613 | int ret; | ||
614 | |||
615 | /* On modern platforms we need not worry ourself with the legacy | ||
616 | * hostbridge query stuff. Skip it entirely | ||
617 | */ | ||
618 | if (INTEL_INFO(dev)->gen < 6) { | ||
619 | ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL); | ||
620 | if (!ret) { | ||
621 | DRM_ERROR("failed to set up gmch\n"); | ||
622 | return -EIO; | ||
623 | } | ||
624 | |||
625 | dev_priv->mm.gtt = intel_gtt_get(); | ||
626 | if (!dev_priv->mm.gtt) { | ||
627 | DRM_ERROR("Failed to initialize GTT\n"); | ||
628 | intel_gmch_remove(); | ||
629 | return -ENODEV; | ||
630 | } | ||
631 | return 0; | ||
632 | } | ||
633 | |||
634 | dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL); | ||
635 | if (!dev_priv->mm.gtt) | ||
636 | return -ENOMEM; | ||
637 | |||
638 | if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40))) | ||
639 | pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40)); | ||
640 | |||
641 | pci_read_config_dword(dev->pdev, PCI_BASE_ADDRESS_0, &tmp); | ||
642 | /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */ | ||
643 | gtt_bus_addr = (tmp & PCI_BASE_ADDRESS_MEM_MASK) + (2<<20); | ||
644 | |||
645 | pci_read_config_dword(dev->pdev, PCI_BASE_ADDRESS_2, &tmp); | ||
646 | dev_priv->mm.gtt->gma_bus_addr = tmp & PCI_BASE_ADDRESS_MEM_MASK; | ||
647 | |||
648 | /* i9xx_setup */ | ||
649 | pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); | ||
650 | dev_priv->mm.gtt->gtt_total_entries = | ||
651 | gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t); | ||
652 | if (INTEL_INFO(dev)->gen < 7) | ||
653 | dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl); | ||
654 | else | ||
655 | dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl); | ||
656 | |||
657 | dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT; | ||
658 | /* 64/512MB is the current min/max we actually know of, but this is just a | ||
659 | * coarse sanity check. | ||
660 | */ | ||
661 | if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 || | ||
662 | dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) { | ||
663 | DRM_ERROR("Unknown GMADR entries (%d)\n", | ||
664 | dev_priv->mm.gtt->gtt_mappable_entries); | ||
665 | ret = -ENXIO; | ||
666 | goto err_out; | ||
667 | } | ||
668 | |||
669 | ret = setup_scratch_page(dev); | ||
670 | if (ret) { | ||
671 | DRM_ERROR("Scratch setup failed\n"); | ||
672 | goto err_out; | ||
673 | } | ||
674 | |||
675 | dev_priv->mm.gtt->gtt = ioremap_wc(gtt_bus_addr, | ||
676 | dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t)); | ||
677 | if (!dev_priv->mm.gtt->gtt) { | ||
678 | DRM_ERROR("Failed to map the gtt page table\n"); | ||
679 | teardown_scratch_page(dev); | ||
680 | ret = -ENOMEM; | ||
681 | goto err_out; | ||
682 | } | ||
683 | |||
684 | /* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */ | ||
685 | DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8); | ||
686 | DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8); | ||
687 | DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20); | ||
688 | |||
689 | return 0; | ||
690 | |||
691 | err_out: | ||
692 | kfree(dev_priv->mm.gtt); | ||
693 | if (INTEL_INFO(dev)->gen < 6) | ||
694 | intel_gmch_remove(); | ||
695 | return ret; | ||
696 | } | ||
697 | |||
698 | void i915_gem_gtt_fini(struct drm_device *dev) | ||
699 | { | ||
700 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
701 | iounmap(dev_priv->mm.gtt->gtt); | ||
702 | teardown_scratch_page(dev); | ||
703 | if (INTEL_INFO(dev)->gen < 6) | ||
704 | intel_gmch_remove(); | ||
705 | kfree(dev_priv->mm.gtt); | ||
394 | } | 706 | } |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 32e1bda865b8..2604867e6b7d 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -122,7 +122,10 @@ static int | |||
122 | i915_pipe_enabled(struct drm_device *dev, int pipe) | 122 | i915_pipe_enabled(struct drm_device *dev, int pipe) |
123 | { | 123 | { |
124 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 124 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
125 | return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; | 125 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
126 | pipe); | ||
127 | |||
128 | return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE; | ||
126 | } | 129 | } |
127 | 130 | ||
128 | /* Called from drm generic code, passed a 'crtc', which | 131 | /* Called from drm generic code, passed a 'crtc', which |
@@ -182,6 +185,8 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, | |||
182 | int vbl_start, vbl_end, htotal, vtotal; | 185 | int vbl_start, vbl_end, htotal, vtotal; |
183 | bool in_vbl = true; | 186 | bool in_vbl = true; |
184 | int ret = 0; | 187 | int ret = 0; |
188 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, | ||
189 | pipe); | ||
185 | 190 | ||
186 | if (!i915_pipe_enabled(dev, pipe)) { | 191 | if (!i915_pipe_enabled(dev, pipe)) { |
187 | DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " | 192 | DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " |
@@ -190,7 +195,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, | |||
190 | } | 195 | } |
191 | 196 | ||
192 | /* Get vtotal. */ | 197 | /* Get vtotal. */ |
193 | vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff); | 198 | vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); |
194 | 199 | ||
195 | if (INTEL_INFO(dev)->gen >= 4) { | 200 | if (INTEL_INFO(dev)->gen >= 4) { |
196 | /* No obvious pixelcount register. Only query vertical | 201 | /* No obvious pixelcount register. Only query vertical |
@@ -210,13 +215,13 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, | |||
210 | */ | 215 | */ |
211 | position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; | 216 | position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; |
212 | 217 | ||
213 | htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff); | 218 | htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); |
214 | *vpos = position / htotal; | 219 | *vpos = position / htotal; |
215 | *hpos = position - (*vpos * htotal); | 220 | *hpos = position - (*vpos * htotal); |
216 | } | 221 | } |
217 | 222 | ||
218 | /* Query vblank area. */ | 223 | /* Query vblank area. */ |
219 | vbl = I915_READ(VBLANK(pipe)); | 224 | vbl = I915_READ(VBLANK(cpu_transcoder)); |
220 | 225 | ||
221 | /* Test position against vblank region. */ | 226 | /* Test position against vblank region. */ |
222 | vbl_start = vbl & 0x1fff; | 227 | vbl_start = vbl & 0x1fff; |
@@ -352,8 +357,7 @@ static void notify_ring(struct drm_device *dev, | |||
352 | if (i915_enable_hangcheck) { | 357 | if (i915_enable_hangcheck) { |
353 | dev_priv->hangcheck_count = 0; | 358 | dev_priv->hangcheck_count = 0; |
354 | mod_timer(&dev_priv->hangcheck_timer, | 359 | mod_timer(&dev_priv->hangcheck_timer, |
355 | jiffies + | 360 | round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); |
356 | msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | ||
357 | } | 361 | } |
358 | } | 362 | } |
359 | 363 | ||
@@ -374,7 +378,7 @@ static void gen6_pm_rps_work(struct work_struct *work) | |||
374 | if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) | 378 | if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) |
375 | return; | 379 | return; |
376 | 380 | ||
377 | mutex_lock(&dev_priv->dev->struct_mutex); | 381 | mutex_lock(&dev_priv->rps.hw_lock); |
378 | 382 | ||
379 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) | 383 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) |
380 | new_delay = dev_priv->rps.cur_delay + 1; | 384 | new_delay = dev_priv->rps.cur_delay + 1; |
@@ -389,7 +393,7 @@ static void gen6_pm_rps_work(struct work_struct *work) | |||
389 | gen6_set_rps(dev_priv->dev, new_delay); | 393 | gen6_set_rps(dev_priv->dev, new_delay); |
390 | } | 394 | } |
391 | 395 | ||
392 | mutex_unlock(&dev_priv->dev->struct_mutex); | 396 | mutex_unlock(&dev_priv->rps.hw_lock); |
393 | } | 397 | } |
394 | 398 | ||
395 | 399 | ||
@@ -405,7 +409,7 @@ static void gen6_pm_rps_work(struct work_struct *work) | |||
405 | static void ivybridge_parity_work(struct work_struct *work) | 409 | static void ivybridge_parity_work(struct work_struct *work) |
406 | { | 410 | { |
407 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | 411 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, |
408 | parity_error_work); | 412 | l3_parity.error_work); |
409 | u32 error_status, row, bank, subbank; | 413 | u32 error_status, row, bank, subbank; |
410 | char *parity_event[5]; | 414 | char *parity_event[5]; |
411 | uint32_t misccpctl; | 415 | uint32_t misccpctl; |
@@ -469,7 +473,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev) | |||
469 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | 473 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
470 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 474 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
471 | 475 | ||
472 | queue_work(dev_priv->wq, &dev_priv->parity_error_work); | 476 | queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); |
473 | } | 477 | } |
474 | 478 | ||
475 | static void snb_gt_irq_handler(struct drm_device *dev, | 479 | static void snb_gt_irq_handler(struct drm_device *dev, |
@@ -520,7 +524,7 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, | |||
520 | queue_work(dev_priv->wq, &dev_priv->rps.work); | 524 | queue_work(dev_priv->wq, &dev_priv->rps.work); |
521 | } | 525 | } |
522 | 526 | ||
523 | static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) | 527 | static irqreturn_t valleyview_irq_handler(int irq, void *arg) |
524 | { | 528 | { |
525 | struct drm_device *dev = (struct drm_device *) arg; | 529 | struct drm_device *dev = (struct drm_device *) arg; |
526 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 530 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
@@ -606,6 +610,9 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) | |||
606 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 610 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
607 | int pipe; | 611 | int pipe; |
608 | 612 | ||
613 | if (pch_iir & SDE_HOTPLUG_MASK) | ||
614 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | ||
615 | |||
609 | if (pch_iir & SDE_AUDIO_POWER_MASK) | 616 | if (pch_iir & SDE_AUDIO_POWER_MASK) |
610 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", | 617 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", |
611 | (pch_iir & SDE_AUDIO_POWER_MASK) >> | 618 | (pch_iir & SDE_AUDIO_POWER_MASK) >> |
@@ -646,6 +653,9 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) | |||
646 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 653 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
647 | int pipe; | 654 | int pipe; |
648 | 655 | ||
656 | if (pch_iir & SDE_HOTPLUG_MASK_CPT) | ||
657 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | ||
658 | |||
649 | if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) | 659 | if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) |
650 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", | 660 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", |
651 | (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> | 661 | (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> |
@@ -670,7 +680,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) | |||
670 | I915_READ(FDI_RX_IIR(pipe))); | 680 | I915_READ(FDI_RX_IIR(pipe))); |
671 | } | 681 | } |
672 | 682 | ||
673 | static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) | 683 | static irqreturn_t ivybridge_irq_handler(int irq, void *arg) |
674 | { | 684 | { |
675 | struct drm_device *dev = (struct drm_device *) arg; | 685 | struct drm_device *dev = (struct drm_device *) arg; |
676 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 686 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
@@ -709,8 +719,6 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) | |||
709 | if (de_iir & DE_PCH_EVENT_IVB) { | 719 | if (de_iir & DE_PCH_EVENT_IVB) { |
710 | u32 pch_iir = I915_READ(SDEIIR); | 720 | u32 pch_iir = I915_READ(SDEIIR); |
711 | 721 | ||
712 | if (pch_iir & SDE_HOTPLUG_MASK_CPT) | ||
713 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | ||
714 | cpt_irq_handler(dev, pch_iir); | 722 | cpt_irq_handler(dev, pch_iir); |
715 | 723 | ||
716 | /* clear PCH hotplug event before clear CPU irq */ | 724 | /* clear PCH hotplug event before clear CPU irq */ |
@@ -745,13 +753,12 @@ static void ilk_gt_irq_handler(struct drm_device *dev, | |||
745 | notify_ring(dev, &dev_priv->ring[VCS]); | 753 | notify_ring(dev, &dev_priv->ring[VCS]); |
746 | } | 754 | } |
747 | 755 | ||
748 | static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) | 756 | static irqreturn_t ironlake_irq_handler(int irq, void *arg) |
749 | { | 757 | { |
750 | struct drm_device *dev = (struct drm_device *) arg; | 758 | struct drm_device *dev = (struct drm_device *) arg; |
751 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 759 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
752 | int ret = IRQ_NONE; | 760 | int ret = IRQ_NONE; |
753 | u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; | 761 | u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; |
754 | u32 hotplug_mask; | ||
755 | 762 | ||
756 | atomic_inc(&dev_priv->irq_received); | 763 | atomic_inc(&dev_priv->irq_received); |
757 | 764 | ||
@@ -769,11 +776,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) | |||
769 | (!IS_GEN6(dev) || pm_iir == 0)) | 776 | (!IS_GEN6(dev) || pm_iir == 0)) |
770 | goto done; | 777 | goto done; |
771 | 778 | ||
772 | if (HAS_PCH_CPT(dev)) | ||
773 | hotplug_mask = SDE_HOTPLUG_MASK_CPT; | ||
774 | else | ||
775 | hotplug_mask = SDE_HOTPLUG_MASK; | ||
776 | |||
777 | ret = IRQ_HANDLED; | 779 | ret = IRQ_HANDLED; |
778 | 780 | ||
779 | if (IS_GEN5(dev)) | 781 | if (IS_GEN5(dev)) |
@@ -802,8 +804,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) | |||
802 | 804 | ||
803 | /* check event from PCH */ | 805 | /* check event from PCH */ |
804 | if (de_iir & DE_PCH_EVENT) { | 806 | if (de_iir & DE_PCH_EVENT) { |
805 | if (pch_iir & hotplug_mask) | ||
806 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | ||
807 | if (HAS_PCH_CPT(dev)) | 807 | if (HAS_PCH_CPT(dev)) |
808 | cpt_irq_handler(dev, pch_iir); | 808 | cpt_irq_handler(dev, pch_iir); |
809 | else | 809 | else |
@@ -1751,7 +1751,7 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1751 | repeat: | 1751 | repeat: |
1752 | /* Reset timer case chip hangs without another request being added */ | 1752 | /* Reset timer case chip hangs without another request being added */ |
1753 | mod_timer(&dev_priv->hangcheck_timer, | 1753 | mod_timer(&dev_priv->hangcheck_timer, |
1754 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | 1754 | round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); |
1755 | } | 1755 | } |
1756 | 1756 | ||
1757 | /* drm_dma.h hooks | 1757 | /* drm_dma.h hooks |
@@ -1956,6 +1956,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev) | |||
1956 | u32 enable_mask; | 1956 | u32 enable_mask; |
1957 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); | 1957 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); |
1958 | u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; | 1958 | u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; |
1959 | u32 render_irqs; | ||
1959 | u16 msid; | 1960 | u16 msid; |
1960 | 1961 | ||
1961 | enable_mask = I915_DISPLAY_PORT_INTERRUPT; | 1962 | enable_mask = I915_DISPLAY_PORT_INTERRUPT; |
@@ -1995,21 +1996,12 @@ static int valleyview_irq_postinstall(struct drm_device *dev) | |||
1995 | I915_WRITE(VLV_IIR, 0xffffffff); | 1996 | I915_WRITE(VLV_IIR, 0xffffffff); |
1996 | I915_WRITE(VLV_IIR, 0xffffffff); | 1997 | I915_WRITE(VLV_IIR, 0xffffffff); |
1997 | 1998 | ||
1998 | dev_priv->gt_irq_mask = ~0; | ||
1999 | |||
2000 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | ||
2001 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 1999 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
2002 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | 2000 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
2003 | I915_WRITE(GTIER, GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT | | 2001 | |
2004 | GT_GEN6_BLT_CS_ERROR_INTERRUPT | | 2002 | render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | |
2005 | GT_GEN6_BLT_USER_INTERRUPT | | 2003 | GEN6_BLITTER_USER_INTERRUPT; |
2006 | GT_GEN6_BSD_USER_INTERRUPT | | 2004 | I915_WRITE(GTIER, render_irqs); |
2007 | GT_GEN6_BSD_CS_ERROR_INTERRUPT | | ||
2008 | GT_GEN7_L3_PARITY_ERROR_INTERRUPT | | ||
2009 | GT_PIPE_NOTIFY | | ||
2010 | GT_RENDER_CS_ERROR_INTERRUPT | | ||
2011 | GT_SYNC_STATUS | | ||
2012 | GT_USER_INTERRUPT); | ||
2013 | POSTING_READ(GTIER); | 2005 | POSTING_READ(GTIER); |
2014 | 2006 | ||
2015 | /* ack & enable invalid PTE error interrupts */ | 2007 | /* ack & enable invalid PTE error interrupts */ |
@@ -2019,7 +2011,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev) | |||
2019 | #endif | 2011 | #endif |
2020 | 2012 | ||
2021 | I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); | 2013 | I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); |
2022 | #if 0 /* FIXME: check register definitions; some have moved */ | ||
2023 | /* Note HDMI and DP share bits */ | 2014 | /* Note HDMI and DP share bits */ |
2024 | if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) | 2015 | if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) |
2025 | hotplug_en |= HDMIB_HOTPLUG_INT_EN; | 2016 | hotplug_en |= HDMIB_HOTPLUG_INT_EN; |
@@ -2027,15 +2018,14 @@ static int valleyview_irq_postinstall(struct drm_device *dev) | |||
2027 | hotplug_en |= HDMIC_HOTPLUG_INT_EN; | 2018 | hotplug_en |= HDMIC_HOTPLUG_INT_EN; |
2028 | if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) | 2019 | if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) |
2029 | hotplug_en |= HDMID_HOTPLUG_INT_EN; | 2020 | hotplug_en |= HDMID_HOTPLUG_INT_EN; |
2030 | if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) | 2021 | if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) |
2031 | hotplug_en |= SDVOC_HOTPLUG_INT_EN; | 2022 | hotplug_en |= SDVOC_HOTPLUG_INT_EN; |
2032 | if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) | 2023 | if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) |
2033 | hotplug_en |= SDVOB_HOTPLUG_INT_EN; | 2024 | hotplug_en |= SDVOB_HOTPLUG_INT_EN; |
2034 | if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { | 2025 | if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { |
2035 | hotplug_en |= CRT_HOTPLUG_INT_EN; | 2026 | hotplug_en |= CRT_HOTPLUG_INT_EN; |
2036 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; | 2027 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; |
2037 | } | 2028 | } |
2038 | #endif | ||
2039 | 2029 | ||
2040 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | 2030 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); |
2041 | 2031 | ||
@@ -2129,7 +2119,7 @@ static int i8xx_irq_postinstall(struct drm_device *dev) | |||
2129 | return 0; | 2119 | return 0; |
2130 | } | 2120 | } |
2131 | 2121 | ||
2132 | static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS) | 2122 | static irqreturn_t i8xx_irq_handler(int irq, void *arg) |
2133 | { | 2123 | { |
2134 | struct drm_device *dev = (struct drm_device *) arg; | 2124 | struct drm_device *dev = (struct drm_device *) arg; |
2135 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 2125 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
@@ -2307,7 +2297,7 @@ static int i915_irq_postinstall(struct drm_device *dev) | |||
2307 | return 0; | 2297 | return 0; |
2308 | } | 2298 | } |
2309 | 2299 | ||
2310 | static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS) | 2300 | static irqreturn_t i915_irq_handler(int irq, void *arg) |
2311 | { | 2301 | { |
2312 | struct drm_device *dev = (struct drm_device *) arg; | 2302 | struct drm_device *dev = (struct drm_device *) arg; |
2313 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 2303 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
@@ -2545,7 +2535,7 @@ static int i965_irq_postinstall(struct drm_device *dev) | |||
2545 | return 0; | 2535 | return 0; |
2546 | } | 2536 | } |
2547 | 2537 | ||
2548 | static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS) | 2538 | static irqreturn_t i965_irq_handler(int irq, void *arg) |
2549 | { | 2539 | { |
2550 | struct drm_device *dev = (struct drm_device *) arg; | 2540 | struct drm_device *dev = (struct drm_device *) arg; |
2551 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 2541 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
@@ -2691,7 +2681,7 @@ void intel_irq_init(struct drm_device *dev) | |||
2691 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); | 2681 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); |
2692 | INIT_WORK(&dev_priv->error_work, i915_error_work_func); | 2682 | INIT_WORK(&dev_priv->error_work, i915_error_work_func); |
2693 | INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); | 2683 | INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); |
2694 | INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work); | 2684 | INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); |
2695 | 2685 | ||
2696 | dev->driver->get_vblank_counter = i915_get_vblank_counter; | 2686 | dev->driver->get_vblank_counter = i915_get_vblank_counter; |
2697 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ | 2687 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index a4162ddff6c5..9118bd112589 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -26,6 +26,7 @@ | |||
26 | #define _I915_REG_H_ | 26 | #define _I915_REG_H_ |
27 | 27 | ||
28 | #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) | 28 | #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) |
29 | #define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) | ||
29 | 30 | ||
30 | #define _PORT(port, a, b) ((a) + (port)*((b)-(a))) | 31 | #define _PORT(port, a, b) ((a) + (port)*((b)-(a))) |
31 | 32 | ||
@@ -40,6 +41,14 @@ | |||
40 | */ | 41 | */ |
41 | #define INTEL_GMCH_CTRL 0x52 | 42 | #define INTEL_GMCH_CTRL 0x52 |
42 | #define INTEL_GMCH_VGA_DISABLE (1 << 1) | 43 | #define INTEL_GMCH_VGA_DISABLE (1 << 1) |
44 | #define SNB_GMCH_CTRL 0x50 | ||
45 | #define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */ | ||
46 | #define SNB_GMCH_GGMS_MASK 0x3 | ||
47 | #define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */ | ||
48 | #define SNB_GMCH_GMS_MASK 0x1f | ||
49 | #define IVB_GMCH_GMS_SHIFT 4 | ||
50 | #define IVB_GMCH_GMS_MASK 0xf | ||
51 | |||
43 | 52 | ||
44 | /* PCI config space */ | 53 | /* PCI config space */ |
45 | 54 | ||
@@ -105,23 +114,6 @@ | |||
105 | #define GEN6_GRDOM_MEDIA (1 << 2) | 114 | #define GEN6_GRDOM_MEDIA (1 << 2) |
106 | #define GEN6_GRDOM_BLT (1 << 3) | 115 | #define GEN6_GRDOM_BLT (1 << 3) |
107 | 116 | ||
108 | /* PPGTT stuff */ | ||
109 | #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) | ||
110 | |||
111 | #define GEN6_PDE_VALID (1 << 0) | ||
112 | #define GEN6_PDE_LARGE_PAGE (2 << 0) /* use 32kb pages */ | ||
113 | /* gen6+ has bit 11-4 for physical addr bit 39-32 */ | ||
114 | #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) | ||
115 | |||
116 | #define GEN6_PTE_VALID (1 << 0) | ||
117 | #define GEN6_PTE_UNCACHED (1 << 1) | ||
118 | #define HSW_PTE_UNCACHED (0) | ||
119 | #define GEN6_PTE_CACHE_LLC (2 << 1) | ||
120 | #define GEN6_PTE_CACHE_LLC_MLC (3 << 1) | ||
121 | #define GEN6_PTE_CACHE_BITS (3 << 1) | ||
122 | #define GEN6_PTE_GFDT (1 << 3) | ||
123 | #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) | ||
124 | |||
125 | #define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228) | 117 | #define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228) |
126 | #define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518) | 118 | #define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518) |
127 | #define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220) | 119 | #define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220) |
@@ -241,11 +233,18 @@ | |||
241 | */ | 233 | */ |
242 | #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) | 234 | #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) |
243 | #define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ | 235 | #define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ |
244 | #define MI_INVALIDATE_TLB (1<<18) | 236 | #define MI_FLUSH_DW_STORE_INDEX (1<<21) |
245 | #define MI_INVALIDATE_BSD (1<<7) | 237 | #define MI_INVALIDATE_TLB (1<<18) |
238 | #define MI_FLUSH_DW_OP_STOREDW (1<<14) | ||
239 | #define MI_INVALIDATE_BSD (1<<7) | ||
240 | #define MI_FLUSH_DW_USE_GTT (1<<2) | ||
241 | #define MI_FLUSH_DW_USE_PPGTT (0<<2) | ||
246 | #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) | 242 | #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) |
247 | #define MI_BATCH_NON_SECURE (1) | 243 | #define MI_BATCH_NON_SECURE (1) |
248 | #define MI_BATCH_NON_SECURE_I965 (1<<8) | 244 | /* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */ |
245 | #define MI_BATCH_NON_SECURE_I965 (1<<8) | ||
246 | #define MI_BATCH_PPGTT_HSW (1<<8) | ||
247 | #define MI_BATCH_NON_SECURE_HSW (1<<13) | ||
249 | #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) | 248 | #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) |
250 | #define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ | 249 | #define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ |
251 | #define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ | 250 | #define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ |
@@ -369,6 +368,7 @@ | |||
369 | #define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */ | 368 | #define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */ |
370 | #define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */ | 369 | #define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */ |
371 | #define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */ | 370 | #define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */ |
371 | #define DPIO_PLL_REFCLK_SEL_MASK 3 | ||
372 | #define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */ | 372 | #define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */ |
373 | #define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */ | 373 | #define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */ |
374 | #define _DPIO_REFSFR_B 0x8034 | 374 | #define _DPIO_REFSFR_B 0x8034 |
@@ -384,6 +384,9 @@ | |||
384 | 384 | ||
385 | #define DPIO_FASTCLK_DISABLE 0x8100 | 385 | #define DPIO_FASTCLK_DISABLE 0x8100 |
386 | 386 | ||
387 | #define DPIO_DATA_CHANNEL1 0x8220 | ||
388 | #define DPIO_DATA_CHANNEL2 0x8420 | ||
389 | |||
387 | /* | 390 | /* |
388 | * Fence registers | 391 | * Fence registers |
389 | */ | 392 | */ |
@@ -521,6 +524,7 @@ | |||
521 | */ | 524 | */ |
522 | # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) | 525 | # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) |
523 | #define _3D_CHICKEN3 0x02090 | 526 | #define _3D_CHICKEN3 0x02090 |
527 | #define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) | ||
524 | #define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) | 528 | #define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) |
525 | 529 | ||
526 | #define MI_MODE 0x0209c | 530 | #define MI_MODE 0x0209c |
@@ -547,6 +551,8 @@ | |||
547 | #define IIR 0x020a4 | 551 | #define IIR 0x020a4 |
548 | #define IMR 0x020a8 | 552 | #define IMR 0x020a8 |
549 | #define ISR 0x020ac | 553 | #define ISR 0x020ac |
554 | #define VLV_GUNIT_CLOCK_GATE 0x182060 | ||
555 | #define GCFG_DIS (1<<8) | ||
550 | #define VLV_IIR_RW 0x182084 | 556 | #define VLV_IIR_RW 0x182084 |
551 | #define VLV_IER 0x1820a0 | 557 | #define VLV_IER 0x1820a0 |
552 | #define VLV_IIR 0x1820a4 | 558 | #define VLV_IIR 0x1820a4 |
@@ -661,6 +667,7 @@ | |||
661 | #define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */ | 667 | #define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */ |
662 | 668 | ||
663 | #define CACHE_MODE_0 0x02120 /* 915+ only */ | 669 | #define CACHE_MODE_0 0x02120 /* 915+ only */ |
670 | #define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8) | ||
664 | #define CM0_IZ_OPT_DISABLE (1<<6) | 671 | #define CM0_IZ_OPT_DISABLE (1<<6) |
665 | #define CM0_ZR_OPT_DISABLE (1<<5) | 672 | #define CM0_ZR_OPT_DISABLE (1<<5) |
666 | #define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5) | 673 | #define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5) |
@@ -670,6 +677,8 @@ | |||
670 | #define CM0_RC_OP_FLUSH_DISABLE (1<<0) | 677 | #define CM0_RC_OP_FLUSH_DISABLE (1<<0) |
671 | #define BB_ADDR 0x02140 /* 8 bytes */ | 678 | #define BB_ADDR 0x02140 /* 8 bytes */ |
672 | #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ | 679 | #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ |
680 | #define GFX_FLSH_CNTL_GEN6 0x101008 | ||
681 | #define GFX_FLSH_CNTL_EN (1<<0) | ||
673 | #define ECOSKPD 0x021d0 | 682 | #define ECOSKPD 0x021d0 |
674 | #define ECO_GATING_CX_ONLY (1<<3) | 683 | #define ECO_GATING_CX_ONLY (1<<3) |
675 | #define ECO_FLIP_DONE (1<<0) | 684 | #define ECO_FLIP_DONE (1<<0) |
@@ -1559,14 +1568,14 @@ | |||
1559 | #define _VSYNCSHIFT_B 0x61028 | 1568 | #define _VSYNCSHIFT_B 0x61028 |
1560 | 1569 | ||
1561 | 1570 | ||
1562 | #define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B) | 1571 | #define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B) |
1563 | #define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B) | 1572 | #define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B) |
1564 | #define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B) | 1573 | #define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B) |
1565 | #define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B) | 1574 | #define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B) |
1566 | #define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B) | 1575 | #define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B) |
1567 | #define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B) | 1576 | #define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B) |
1568 | #define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) | 1577 | #define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) |
1569 | #define VSYNCSHIFT(pipe) _PIPE(pipe, _VSYNCSHIFT_A, _VSYNCSHIFT_B) | 1578 | #define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B) |
1570 | 1579 | ||
1571 | /* VGA port control */ | 1580 | /* VGA port control */ |
1572 | #define ADPA 0x61100 | 1581 | #define ADPA 0x61100 |
@@ -2641,6 +2650,7 @@ | |||
2641 | #define PIPECONF_GAMMA (1<<24) | 2650 | #define PIPECONF_GAMMA (1<<24) |
2642 | #define PIPECONF_FORCE_BORDER (1<<25) | 2651 | #define PIPECONF_FORCE_BORDER (1<<25) |
2643 | #define PIPECONF_INTERLACE_MASK (7 << 21) | 2652 | #define PIPECONF_INTERLACE_MASK (7 << 21) |
2653 | #define PIPECONF_INTERLACE_MASK_HSW (3 << 21) | ||
2644 | /* Note that pre-gen3 does not support interlaced display directly. Panel | 2654 | /* Note that pre-gen3 does not support interlaced display directly. Panel |
2645 | * fitting must be disabled on pre-ilk for interlaced. */ | 2655 | * fitting must be disabled on pre-ilk for interlaced. */ |
2646 | #define PIPECONF_PROGRESSIVE (0 << 21) | 2656 | #define PIPECONF_PROGRESSIVE (0 << 21) |
@@ -2711,7 +2721,7 @@ | |||
2711 | #define PIPE_12BPC (3 << 5) | 2721 | #define PIPE_12BPC (3 << 5) |
2712 | 2722 | ||
2713 | #define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC) | 2723 | #define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC) |
2714 | #define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF) | 2724 | #define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF) |
2715 | #define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL) | 2725 | #define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL) |
2716 | #define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH) | 2726 | #define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH) |
2717 | #define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) | 2727 | #define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) |
@@ -2998,12 +3008,19 @@ | |||
2998 | #define DISPPLANE_GAMMA_ENABLE (1<<30) | 3008 | #define DISPPLANE_GAMMA_ENABLE (1<<30) |
2999 | #define DISPPLANE_GAMMA_DISABLE 0 | 3009 | #define DISPPLANE_GAMMA_DISABLE 0 |
3000 | #define DISPPLANE_PIXFORMAT_MASK (0xf<<26) | 3010 | #define DISPPLANE_PIXFORMAT_MASK (0xf<<26) |
3011 | #define DISPPLANE_YUV422 (0x0<<26) | ||
3001 | #define DISPPLANE_8BPP (0x2<<26) | 3012 | #define DISPPLANE_8BPP (0x2<<26) |
3002 | #define DISPPLANE_15_16BPP (0x4<<26) | 3013 | #define DISPPLANE_BGRA555 (0x3<<26) |
3003 | #define DISPPLANE_16BPP (0x5<<26) | 3014 | #define DISPPLANE_BGRX555 (0x4<<26) |
3004 | #define DISPPLANE_32BPP_NO_ALPHA (0x6<<26) | 3015 | #define DISPPLANE_BGRX565 (0x5<<26) |
3005 | #define DISPPLANE_32BPP (0x7<<26) | 3016 | #define DISPPLANE_BGRX888 (0x6<<26) |
3006 | #define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26) | 3017 | #define DISPPLANE_BGRA888 (0x7<<26) |
3018 | #define DISPPLANE_RGBX101010 (0x8<<26) | ||
3019 | #define DISPPLANE_RGBA101010 (0x9<<26) | ||
3020 | #define DISPPLANE_BGRX101010 (0xa<<26) | ||
3021 | #define DISPPLANE_RGBX161616 (0xc<<26) | ||
3022 | #define DISPPLANE_RGBX888 (0xe<<26) | ||
3023 | #define DISPPLANE_RGBA888 (0xf<<26) | ||
3007 | #define DISPPLANE_STEREO_ENABLE (1<<25) | 3024 | #define DISPPLANE_STEREO_ENABLE (1<<25) |
3008 | #define DISPPLANE_STEREO_DISABLE 0 | 3025 | #define DISPPLANE_STEREO_DISABLE 0 |
3009 | #define DISPPLANE_SEL_PIPE_SHIFT 24 | 3026 | #define DISPPLANE_SEL_PIPE_SHIFT 24 |
@@ -3024,6 +3041,8 @@ | |||
3024 | #define _DSPASIZE 0x70190 | 3041 | #define _DSPASIZE 0x70190 |
3025 | #define _DSPASURF 0x7019C /* 965+ only */ | 3042 | #define _DSPASURF 0x7019C /* 965+ only */ |
3026 | #define _DSPATILEOFF 0x701A4 /* 965+ only */ | 3043 | #define _DSPATILEOFF 0x701A4 /* 965+ only */ |
3044 | #define _DSPAOFFSET 0x701A4 /* HSW */ | ||
3045 | #define _DSPASURFLIVE 0x701AC | ||
3027 | 3046 | ||
3028 | #define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR) | 3047 | #define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR) |
3029 | #define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR) | 3048 | #define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR) |
@@ -3033,6 +3052,8 @@ | |||
3033 | #define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF) | 3052 | #define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF) |
3034 | #define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF) | 3053 | #define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF) |
3035 | #define DSPLINOFF(plane) DSPADDR(plane) | 3054 | #define DSPLINOFF(plane) DSPADDR(plane) |
3055 | #define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET) | ||
3056 | #define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE) | ||
3036 | 3057 | ||
3037 | /* Display/Sprite base address macros */ | 3058 | /* Display/Sprite base address macros */ |
3038 | #define DISP_BASEADDR_MASK (0xfffff000) | 3059 | #define DISP_BASEADDR_MASK (0xfffff000) |
@@ -3078,6 +3099,8 @@ | |||
3078 | #define _DSPBSIZE 0x71190 | 3099 | #define _DSPBSIZE 0x71190 |
3079 | #define _DSPBSURF 0x7119C | 3100 | #define _DSPBSURF 0x7119C |
3080 | #define _DSPBTILEOFF 0x711A4 | 3101 | #define _DSPBTILEOFF 0x711A4 |
3102 | #define _DSPBOFFSET 0x711A4 | ||
3103 | #define _DSPBSURFLIVE 0x711AC | ||
3081 | 3104 | ||
3082 | /* Sprite A control */ | 3105 | /* Sprite A control */ |
3083 | #define _DVSACNTR 0x72180 | 3106 | #define _DVSACNTR 0x72180 |
@@ -3143,6 +3166,7 @@ | |||
3143 | #define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF) | 3166 | #define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF) |
3144 | #define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL) | 3167 | #define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL) |
3145 | #define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK) | 3168 | #define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK) |
3169 | #define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE) | ||
3146 | 3170 | ||
3147 | #define _SPRA_CTL 0x70280 | 3171 | #define _SPRA_CTL 0x70280 |
3148 | #define SPRITE_ENABLE (1<<31) | 3172 | #define SPRITE_ENABLE (1<<31) |
@@ -3177,6 +3201,8 @@ | |||
3177 | #define _SPRA_SURF 0x7029c | 3201 | #define _SPRA_SURF 0x7029c |
3178 | #define _SPRA_KEYMAX 0x702a0 | 3202 | #define _SPRA_KEYMAX 0x702a0 |
3179 | #define _SPRA_TILEOFF 0x702a4 | 3203 | #define _SPRA_TILEOFF 0x702a4 |
3204 | #define _SPRA_OFFSET 0x702a4 | ||
3205 | #define _SPRA_SURFLIVE 0x702ac | ||
3180 | #define _SPRA_SCALE 0x70304 | 3206 | #define _SPRA_SCALE 0x70304 |
3181 | #define SPRITE_SCALE_ENABLE (1<<31) | 3207 | #define SPRITE_SCALE_ENABLE (1<<31) |
3182 | #define SPRITE_FILTER_MASK (3<<29) | 3208 | #define SPRITE_FILTER_MASK (3<<29) |
@@ -3197,6 +3223,8 @@ | |||
3197 | #define _SPRB_SURF 0x7129c | 3223 | #define _SPRB_SURF 0x7129c |
3198 | #define _SPRB_KEYMAX 0x712a0 | 3224 | #define _SPRB_KEYMAX 0x712a0 |
3199 | #define _SPRB_TILEOFF 0x712a4 | 3225 | #define _SPRB_TILEOFF 0x712a4 |
3226 | #define _SPRB_OFFSET 0x712a4 | ||
3227 | #define _SPRB_SURFLIVE 0x712ac | ||
3200 | #define _SPRB_SCALE 0x71304 | 3228 | #define _SPRB_SCALE 0x71304 |
3201 | #define _SPRB_GAMC 0x71400 | 3229 | #define _SPRB_GAMC 0x71400 |
3202 | 3230 | ||
@@ -3210,8 +3238,10 @@ | |||
3210 | #define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF) | 3238 | #define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF) |
3211 | #define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX) | 3239 | #define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX) |
3212 | #define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF) | 3240 | #define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF) |
3241 | #define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET) | ||
3213 | #define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE) | 3242 | #define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE) |
3214 | #define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) | 3243 | #define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) |
3244 | #define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE) | ||
3215 | 3245 | ||
3216 | /* VBIOS regs */ | 3246 | /* VBIOS regs */ |
3217 | #define VGACNTRL 0x71400 | 3247 | #define VGACNTRL 0x71400 |
@@ -3246,12 +3276,6 @@ | |||
3246 | #define DISPLAY_PORT_PLL_BIOS_1 0x46010 | 3276 | #define DISPLAY_PORT_PLL_BIOS_1 0x46010 |
3247 | #define DISPLAY_PORT_PLL_BIOS_2 0x46014 | 3277 | #define DISPLAY_PORT_PLL_BIOS_2 0x46014 |
3248 | 3278 | ||
3249 | #define PCH_DSPCLK_GATE_D 0x42020 | ||
3250 | # define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9) | ||
3251 | # define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8) | ||
3252 | # define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7) | ||
3253 | # define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5) | ||
3254 | |||
3255 | #define PCH_3DCGDIS0 0x46020 | 3279 | #define PCH_3DCGDIS0 0x46020 |
3256 | # define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18) | 3280 | # define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18) |
3257 | # define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1) | 3281 | # define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1) |
@@ -3301,14 +3325,14 @@ | |||
3301 | #define _PIPEB_LINK_M2 0x61048 | 3325 | #define _PIPEB_LINK_M2 0x61048 |
3302 | #define _PIPEB_LINK_N2 0x6104c | 3326 | #define _PIPEB_LINK_N2 0x6104c |
3303 | 3327 | ||
3304 | #define PIPE_DATA_M1(pipe) _PIPE(pipe, _PIPEA_DATA_M1, _PIPEB_DATA_M1) | 3328 | #define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1) |
3305 | #define PIPE_DATA_N1(pipe) _PIPE(pipe, _PIPEA_DATA_N1, _PIPEB_DATA_N1) | 3329 | #define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1) |
3306 | #define PIPE_DATA_M2(pipe) _PIPE(pipe, _PIPEA_DATA_M2, _PIPEB_DATA_M2) | 3330 | #define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2) |
3307 | #define PIPE_DATA_N2(pipe) _PIPE(pipe, _PIPEA_DATA_N2, _PIPEB_DATA_N2) | 3331 | #define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2) |
3308 | #define PIPE_LINK_M1(pipe) _PIPE(pipe, _PIPEA_LINK_M1, _PIPEB_LINK_M1) | 3332 | #define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1) |
3309 | #define PIPE_LINK_N1(pipe) _PIPE(pipe, _PIPEA_LINK_N1, _PIPEB_LINK_N1) | 3333 | #define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1) |
3310 | #define PIPE_LINK_M2(pipe) _PIPE(pipe, _PIPEA_LINK_M2, _PIPEB_LINK_M2) | 3334 | #define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2) |
3311 | #define PIPE_LINK_N2(pipe) _PIPE(pipe, _PIPEA_LINK_N2, _PIPEB_LINK_N2) | 3335 | #define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2) |
3312 | 3336 | ||
3313 | /* CPU panel fitter */ | 3337 | /* CPU panel fitter */ |
3314 | /* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */ | 3338 | /* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */ |
@@ -3423,15 +3447,13 @@ | |||
3423 | #define ILK_HDCP_DISABLE (1<<25) | 3447 | #define ILK_HDCP_DISABLE (1<<25) |
3424 | #define ILK_eDP_A_DISABLE (1<<24) | 3448 | #define ILK_eDP_A_DISABLE (1<<24) |
3425 | #define ILK_DESKTOP (1<<23) | 3449 | #define ILK_DESKTOP (1<<23) |
3426 | #define ILK_DSPCLK_GATE 0x42020 | ||
3427 | #define IVB_VRHUNIT_CLK_GATE (1<<28) | ||
3428 | #define ILK_DPARB_CLK_GATE (1<<5) | ||
3429 | #define ILK_DPFD_CLK_GATE (1<<7) | ||
3430 | 3450 | ||
3431 | /* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */ | 3451 | #define ILK_DSPCLK_GATE_D 0x42020 |
3432 | #define ILK_CLK_FBC (1<<7) | 3452 | #define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) |
3433 | #define ILK_DPFC_DIS1 (1<<8) | 3453 | #define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9) |
3434 | #define ILK_DPFC_DIS2 (1<<9) | 3454 | #define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8) |
3455 | #define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7) | ||
3456 | #define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5) | ||
3435 | 3457 | ||
3436 | #define IVB_CHICKEN3 0x4200c | 3458 | #define IVB_CHICKEN3 0x4200c |
3437 | # define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5) | 3459 | # define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5) |
@@ -3447,14 +3469,21 @@ | |||
3447 | 3469 | ||
3448 | #define GEN7_L3CNTLREG1 0xB01C | 3470 | #define GEN7_L3CNTLREG1 0xB01C |
3449 | #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C | 3471 | #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C |
3472 | #define GEN7_L3AGDIS (1<<19) | ||
3450 | 3473 | ||
3451 | #define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030 | 3474 | #define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030 |
3452 | #define GEN7_WA_L3_CHICKEN_MODE 0x20000000 | 3475 | #define GEN7_WA_L3_CHICKEN_MODE 0x20000000 |
3453 | 3476 | ||
3477 | #define GEN7_L3SQCREG4 0xb034 | ||
3478 | #define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27) | ||
3479 | |||
3454 | /* WaCatErrorRejectionIssue */ | 3480 | /* WaCatErrorRejectionIssue */ |
3455 | #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 | 3481 | #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 |
3456 | #define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) | 3482 | #define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) |
3457 | 3483 | ||
3484 | #define HSW_FUSE_STRAP 0x42014 | ||
3485 | #define HSW_CDCLK_LIMIT (1 << 24) | ||
3486 | |||
3458 | /* PCH */ | 3487 | /* PCH */ |
3459 | 3488 | ||
3460 | /* south display engine interrupt: IBX */ | 3489 | /* south display engine interrupt: IBX */ |
@@ -3686,7 +3715,7 @@ | |||
3686 | #define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) | 3715 | #define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) |
3687 | #define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) | 3716 | #define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) |
3688 | 3717 | ||
3689 | #define VLV_VIDEO_DIP_CTL_A 0x60220 | 3718 | #define VLV_VIDEO_DIP_CTL_A 0x60200 |
3690 | #define VLV_VIDEO_DIP_DATA_A 0x60208 | 3719 | #define VLV_VIDEO_DIP_DATA_A 0x60208 |
3691 | #define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210 | 3720 | #define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210 |
3692 | 3721 | ||
@@ -3795,16 +3824,22 @@ | |||
3795 | #define TRANS_6BPC (2<<5) | 3824 | #define TRANS_6BPC (2<<5) |
3796 | #define TRANS_12BPC (3<<5) | 3825 | #define TRANS_12BPC (3<<5) |
3797 | 3826 | ||
3827 | #define _TRANSA_CHICKEN1 0xf0060 | ||
3828 | #define _TRANSB_CHICKEN1 0xf1060 | ||
3829 | #define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1) | ||
3830 | #define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4) | ||
3798 | #define _TRANSA_CHICKEN2 0xf0064 | 3831 | #define _TRANSA_CHICKEN2 0xf0064 |
3799 | #define _TRANSB_CHICKEN2 0xf1064 | 3832 | #define _TRANSB_CHICKEN2 0xf1064 |
3800 | #define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) | 3833 | #define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) |
3801 | #define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31) | 3834 | #define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31) |
3835 | |||
3802 | 3836 | ||
3803 | #define SOUTH_CHICKEN1 0xc2000 | 3837 | #define SOUTH_CHICKEN1 0xc2000 |
3804 | #define FDIA_PHASE_SYNC_SHIFT_OVR 19 | 3838 | #define FDIA_PHASE_SYNC_SHIFT_OVR 19 |
3805 | #define FDIA_PHASE_SYNC_SHIFT_EN 18 | 3839 | #define FDIA_PHASE_SYNC_SHIFT_EN 18 |
3806 | #define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) | 3840 | #define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) |
3807 | #define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) | 3841 | #define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) |
3842 | #define FDI_BC_BIFURCATION_SELECT (1 << 12) | ||
3808 | #define SOUTH_CHICKEN2 0xc2004 | 3843 | #define SOUTH_CHICKEN2 0xc2004 |
3809 | #define DPLS_EDP_PPS_FIX_DIS (1<<0) | 3844 | #define DPLS_EDP_PPS_FIX_DIS (1<<0) |
3810 | 3845 | ||
@@ -3901,16 +3936,21 @@ | |||
3901 | #define FDI_PORT_WIDTH_2X_LPT (1<<19) | 3936 | #define FDI_PORT_WIDTH_2X_LPT (1<<19) |
3902 | #define FDI_PORT_WIDTH_1X_LPT (0<<19) | 3937 | #define FDI_PORT_WIDTH_1X_LPT (0<<19) |
3903 | 3938 | ||
3904 | #define _FDI_RXA_MISC 0xf0010 | 3939 | #define _FDI_RXA_MISC 0xf0010 |
3905 | #define _FDI_RXB_MISC 0xf1010 | 3940 | #define _FDI_RXB_MISC 0xf1010 |
3941 | #define FDI_RX_PWRDN_LANE1_MASK (3<<26) | ||
3942 | #define FDI_RX_PWRDN_LANE1_VAL(x) ((x)<<26) | ||
3943 | #define FDI_RX_PWRDN_LANE0_MASK (3<<24) | ||
3944 | #define FDI_RX_PWRDN_LANE0_VAL(x) ((x)<<24) | ||
3945 | #define FDI_RX_TP1_TO_TP2_48 (2<<20) | ||
3946 | #define FDI_RX_TP1_TO_TP2_64 (3<<20) | ||
3947 | #define FDI_RX_FDI_DELAY_90 (0x90<<0) | ||
3948 | #define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC) | ||
3949 | |||
3906 | #define _FDI_RXA_TUSIZE1 0xf0030 | 3950 | #define _FDI_RXA_TUSIZE1 0xf0030 |
3907 | #define _FDI_RXA_TUSIZE2 0xf0038 | 3951 | #define _FDI_RXA_TUSIZE2 0xf0038 |
3908 | #define _FDI_RXB_TUSIZE1 0xf1030 | 3952 | #define _FDI_RXB_TUSIZE1 0xf1030 |
3909 | #define _FDI_RXB_TUSIZE2 0xf1038 | 3953 | #define _FDI_RXB_TUSIZE2 0xf1038 |
3910 | #define FDI_RX_TP1_TO_TP2_48 (2<<20) | ||
3911 | #define FDI_RX_TP1_TO_TP2_64 (3<<20) | ||
3912 | #define FDI_RX_FDI_DELAY_90 (0x90<<0) | ||
3913 | #define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC) | ||
3914 | #define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1) | 3954 | #define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1) |
3915 | #define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2) | 3955 | #define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2) |
3916 | 3956 | ||
@@ -4003,6 +4043,11 @@ | |||
4003 | #define PANEL_LIGHT_ON_DELAY_SHIFT 0 | 4043 | #define PANEL_LIGHT_ON_DELAY_SHIFT 0 |
4004 | 4044 | ||
4005 | #define PCH_PP_OFF_DELAYS 0xc720c | 4045 | #define PCH_PP_OFF_DELAYS 0xc720c |
4046 | #define PANEL_POWER_PORT_SELECT_MASK (0x3 << 30) | ||
4047 | #define PANEL_POWER_PORT_LVDS (0 << 30) | ||
4048 | #define PANEL_POWER_PORT_DP_A (1 << 30) | ||
4049 | #define PANEL_POWER_PORT_DP_C (2 << 30) | ||
4050 | #define PANEL_POWER_PORT_DP_D (3 << 30) | ||
4006 | #define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000) | 4051 | #define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000) |
4007 | #define PANEL_POWER_DOWN_DELAY_SHIFT 16 | 4052 | #define PANEL_POWER_DOWN_DELAY_SHIFT 16 |
4008 | #define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff) | 4053 | #define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff) |
@@ -4050,7 +4095,7 @@ | |||
4050 | #define TRANS_DP_CTL_A 0xe0300 | 4095 | #define TRANS_DP_CTL_A 0xe0300 |
4051 | #define TRANS_DP_CTL_B 0xe1300 | 4096 | #define TRANS_DP_CTL_B 0xe1300 |
4052 | #define TRANS_DP_CTL_C 0xe2300 | 4097 | #define TRANS_DP_CTL_C 0xe2300 |
4053 | #define TRANS_DP_CTL(pipe) (TRANS_DP_CTL_A + (pipe) * 0x01000) | 4098 | #define TRANS_DP_CTL(pipe) _PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B) |
4054 | #define TRANS_DP_OUTPUT_ENABLE (1<<31) | 4099 | #define TRANS_DP_OUTPUT_ENABLE (1<<31) |
4055 | #define TRANS_DP_PORT_SEL_B (0<<29) | 4100 | #define TRANS_DP_PORT_SEL_B (0<<29) |
4056 | #define TRANS_DP_PORT_SEL_C (1<<29) | 4101 | #define TRANS_DP_PORT_SEL_C (1<<29) |
@@ -4108,6 +4153,8 @@ | |||
4108 | #define FORCEWAKE_ACK_HSW 0x130044 | 4153 | #define FORCEWAKE_ACK_HSW 0x130044 |
4109 | #define FORCEWAKE_ACK 0x130090 | 4154 | #define FORCEWAKE_ACK 0x130090 |
4110 | #define FORCEWAKE_MT 0xa188 /* multi-threaded */ | 4155 | #define FORCEWAKE_MT 0xa188 /* multi-threaded */ |
4156 | #define FORCEWAKE_KERNEL 0x1 | ||
4157 | #define FORCEWAKE_USER 0x2 | ||
4111 | #define FORCEWAKE_MT_ACK 0x130040 | 4158 | #define FORCEWAKE_MT_ACK 0x130040 |
4112 | #define ECOBUS 0xa180 | 4159 | #define ECOBUS 0xa180 |
4113 | #define FORCEWAKE_MT_ENABLE (1<<5) | 4160 | #define FORCEWAKE_MT_ENABLE (1<<5) |
@@ -4220,6 +4267,10 @@ | |||
4220 | #define GEN6_READ_OC_PARAMS 0xc | 4267 | #define GEN6_READ_OC_PARAMS 0xc |
4221 | #define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8 | 4268 | #define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8 |
4222 | #define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9 | 4269 | #define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9 |
4270 | #define GEN6_PCODE_WRITE_RC6VIDS 0x4 | ||
4271 | #define GEN6_PCODE_READ_RC6VIDS 0x5 | ||
4272 | #define GEN6_ENCODE_RC6_VID(mv) (((mv) / 5) - 245) < 0 ?: 0 | ||
4273 | #define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) > 0 ? ((vids) * 5) + 245 : 0) | ||
4223 | #define GEN6_PCODE_DATA 0x138128 | 4274 | #define GEN6_PCODE_DATA 0x138128 |
4224 | #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 | 4275 | #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 |
4225 | 4276 | ||
@@ -4251,6 +4302,15 @@ | |||
4251 | #define GEN7_L3LOG_BASE 0xB070 | 4302 | #define GEN7_L3LOG_BASE 0xB070 |
4252 | #define GEN7_L3LOG_SIZE 0x80 | 4303 | #define GEN7_L3LOG_SIZE 0x80 |
4253 | 4304 | ||
4305 | #define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */ | ||
4306 | #define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100 | ||
4307 | #define GEN7_MAX_PS_THREAD_DEP (8<<12) | ||
4308 | #define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3) | ||
4309 | |||
4310 | #define GEN7_ROW_CHICKEN2 0xe4f4 | ||
4311 | #define GEN7_ROW_CHICKEN2_GT2 0xf4f4 | ||
4312 | #define DOP_CLOCK_GATING_DISABLE (1<<0) | ||
4313 | |||
4254 | #define G4X_AUD_VID_DID 0x62020 | 4314 | #define G4X_AUD_VID_DID 0x62020 |
4255 | #define INTEL_AUDIO_DEVCL 0x808629FB | 4315 | #define INTEL_AUDIO_DEVCL 0x808629FB |
4256 | #define INTEL_AUDIO_DEVBLC 0x80862801 | 4316 | #define INTEL_AUDIO_DEVBLC 0x80862801 |
@@ -4380,33 +4440,39 @@ | |||
4380 | #define HSW_PWR_WELL_CTL6 0x45414 | 4440 | #define HSW_PWR_WELL_CTL6 0x45414 |
4381 | 4441 | ||
4382 | /* Per-pipe DDI Function Control */ | 4442 | /* Per-pipe DDI Function Control */ |
4383 | #define PIPE_DDI_FUNC_CTL_A 0x60400 | 4443 | #define TRANS_DDI_FUNC_CTL_A 0x60400 |
4384 | #define PIPE_DDI_FUNC_CTL_B 0x61400 | 4444 | #define TRANS_DDI_FUNC_CTL_B 0x61400 |
4385 | #define PIPE_DDI_FUNC_CTL_C 0x62400 | 4445 | #define TRANS_DDI_FUNC_CTL_C 0x62400 |
4386 | #define PIPE_DDI_FUNC_CTL_EDP 0x6F400 | 4446 | #define TRANS_DDI_FUNC_CTL_EDP 0x6F400 |
4387 | #define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \ | 4447 | #define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \ |
4388 | PIPE_DDI_FUNC_CTL_B) | 4448 | TRANS_DDI_FUNC_CTL_B) |
4389 | #define PIPE_DDI_FUNC_ENABLE (1<<31) | 4449 | #define TRANS_DDI_FUNC_ENABLE (1<<31) |
4390 | /* Those bits are ignored by pipe EDP since it can only connect to DDI A */ | 4450 | /* Those bits are ignored by pipe EDP since it can only connect to DDI A */ |
4391 | #define PIPE_DDI_PORT_MASK (7<<28) | 4451 | #define TRANS_DDI_PORT_MASK (7<<28) |
4392 | #define PIPE_DDI_SELECT_PORT(x) ((x)<<28) | 4452 | #define TRANS_DDI_SELECT_PORT(x) ((x)<<28) |
4393 | #define PIPE_DDI_MODE_SELECT_MASK (7<<24) | 4453 | #define TRANS_DDI_PORT_NONE (0<<28) |
4394 | #define PIPE_DDI_MODE_SELECT_HDMI (0<<24) | 4454 | #define TRANS_DDI_MODE_SELECT_MASK (7<<24) |
4395 | #define PIPE_DDI_MODE_SELECT_DVI (1<<24) | 4455 | #define TRANS_DDI_MODE_SELECT_HDMI (0<<24) |
4396 | #define PIPE_DDI_MODE_SELECT_DP_SST (2<<24) | 4456 | #define TRANS_DDI_MODE_SELECT_DVI (1<<24) |
4397 | #define PIPE_DDI_MODE_SELECT_DP_MST (3<<24) | 4457 | #define TRANS_DDI_MODE_SELECT_DP_SST (2<<24) |
4398 | #define PIPE_DDI_MODE_SELECT_FDI (4<<24) | 4458 | #define TRANS_DDI_MODE_SELECT_DP_MST (3<<24) |
4399 | #define PIPE_DDI_BPC_MASK (7<<20) | 4459 | #define TRANS_DDI_MODE_SELECT_FDI (4<<24) |
4400 | #define PIPE_DDI_BPC_8 (0<<20) | 4460 | #define TRANS_DDI_BPC_MASK (7<<20) |
4401 | #define PIPE_DDI_BPC_10 (1<<20) | 4461 | #define TRANS_DDI_BPC_8 (0<<20) |
4402 | #define PIPE_DDI_BPC_6 (2<<20) | 4462 | #define TRANS_DDI_BPC_10 (1<<20) |
4403 | #define PIPE_DDI_BPC_12 (3<<20) | 4463 | #define TRANS_DDI_BPC_6 (2<<20) |
4404 | #define PIPE_DDI_PVSYNC (1<<17) | 4464 | #define TRANS_DDI_BPC_12 (3<<20) |
4405 | #define PIPE_DDI_PHSYNC (1<<16) | 4465 | #define TRANS_DDI_PVSYNC (1<<17) |
4406 | #define PIPE_DDI_BFI_ENABLE (1<<4) | 4466 | #define TRANS_DDI_PHSYNC (1<<16) |
4407 | #define PIPE_DDI_PORT_WIDTH_X1 (0<<1) | 4467 | #define TRANS_DDI_EDP_INPUT_MASK (7<<12) |
4408 | #define PIPE_DDI_PORT_WIDTH_X2 (1<<1) | 4468 | #define TRANS_DDI_EDP_INPUT_A_ON (0<<12) |
4409 | #define PIPE_DDI_PORT_WIDTH_X4 (3<<1) | 4469 | #define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12) |
4470 | #define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12) | ||
4471 | #define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12) | ||
4472 | #define TRANS_DDI_BFI_ENABLE (1<<4) | ||
4473 | #define TRANS_DDI_PORT_WIDTH_X1 (0<<1) | ||
4474 | #define TRANS_DDI_PORT_WIDTH_X2 (1<<1) | ||
4475 | #define TRANS_DDI_PORT_WIDTH_X4 (3<<1) | ||
4410 | 4476 | ||
4411 | /* DisplayPort Transport Control */ | 4477 | /* DisplayPort Transport Control */ |
4412 | #define DP_TP_CTL_A 0x64040 | 4478 | #define DP_TP_CTL_A 0x64040 |
@@ -4420,12 +4486,16 @@ | |||
4420 | #define DP_TP_CTL_LINK_TRAIN_MASK (7<<8) | 4486 | #define DP_TP_CTL_LINK_TRAIN_MASK (7<<8) |
4421 | #define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8) | 4487 | #define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8) |
4422 | #define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8) | 4488 | #define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8) |
4489 | #define DP_TP_CTL_LINK_TRAIN_PAT3 (4<<8) | ||
4490 | #define DP_TP_CTL_LINK_TRAIN_IDLE (2<<8) | ||
4423 | #define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8) | 4491 | #define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8) |
4492 | #define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7) | ||
4424 | 4493 | ||
4425 | /* DisplayPort Transport Status */ | 4494 | /* DisplayPort Transport Status */ |
4426 | #define DP_TP_STATUS_A 0x64044 | 4495 | #define DP_TP_STATUS_A 0x64044 |
4427 | #define DP_TP_STATUS_B 0x64144 | 4496 | #define DP_TP_STATUS_B 0x64144 |
4428 | #define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B) | 4497 | #define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B) |
4498 | #define DP_TP_STATUS_IDLE_DONE (1<<25) | ||
4429 | #define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12) | 4499 | #define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12) |
4430 | 4500 | ||
4431 | /* DDI Buffer Control */ | 4501 | /* DDI Buffer Control */ |
@@ -4490,8 +4560,8 @@ | |||
4490 | /* SPLL */ | 4560 | /* SPLL */ |
4491 | #define SPLL_CTL 0x46020 | 4561 | #define SPLL_CTL 0x46020 |
4492 | #define SPLL_PLL_ENABLE (1<<31) | 4562 | #define SPLL_PLL_ENABLE (1<<31) |
4493 | #define SPLL_PLL_SCC (1<<28) | 4563 | #define SPLL_PLL_SSC (1<<28) |
4494 | #define SPLL_PLL_NON_SCC (2<<28) | 4564 | #define SPLL_PLL_NON_SSC (2<<28) |
4495 | #define SPLL_PLL_FREQ_810MHz (0<<26) | 4565 | #define SPLL_PLL_FREQ_810MHz (0<<26) |
4496 | #define SPLL_PLL_FREQ_1350MHz (1<<26) | 4566 | #define SPLL_PLL_FREQ_1350MHz (1<<26) |
4497 | 4567 | ||
@@ -4500,7 +4570,7 @@ | |||
4500 | #define WRPLL_CTL2 0x46060 | 4570 | #define WRPLL_CTL2 0x46060 |
4501 | #define WRPLL_PLL_ENABLE (1<<31) | 4571 | #define WRPLL_PLL_ENABLE (1<<31) |
4502 | #define WRPLL_PLL_SELECT_SSC (0x01<<28) | 4572 | #define WRPLL_PLL_SELECT_SSC (0x01<<28) |
4503 | #define WRPLL_PLL_SELECT_NON_SCC (0x02<<28) | 4573 | #define WRPLL_PLL_SELECT_NON_SSC (0x02<<28) |
4504 | #define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) | 4574 | #define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) |
4505 | /* WRPLL divider programming */ | 4575 | /* WRPLL divider programming */ |
4506 | #define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) | 4576 | #define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) |
@@ -4517,21 +4587,36 @@ | |||
4517 | #define PORT_CLK_SEL_SPLL (3<<29) | 4587 | #define PORT_CLK_SEL_SPLL (3<<29) |
4518 | #define PORT_CLK_SEL_WRPLL1 (4<<29) | 4588 | #define PORT_CLK_SEL_WRPLL1 (4<<29) |
4519 | #define PORT_CLK_SEL_WRPLL2 (5<<29) | 4589 | #define PORT_CLK_SEL_WRPLL2 (5<<29) |
4520 | 4590 | #define PORT_CLK_SEL_NONE (7<<29) | |
4521 | /* Pipe clock selection */ | 4591 | |
4522 | #define PIPE_CLK_SEL_A 0x46140 | 4592 | /* Transcoder clock selection */ |
4523 | #define PIPE_CLK_SEL_B 0x46144 | 4593 | #define TRANS_CLK_SEL_A 0x46140 |
4524 | #define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B) | 4594 | #define TRANS_CLK_SEL_B 0x46144 |
4525 | /* For each pipe, we need to select the corresponding port clock */ | 4595 | #define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B) |
4526 | #define PIPE_CLK_SEL_DISABLED (0x0<<29) | 4596 | /* For each transcoder, we need to select the corresponding port clock */ |
4527 | #define PIPE_CLK_SEL_PORT(x) ((x+1)<<29) | 4597 | #define TRANS_CLK_SEL_DISABLED (0x0<<29) |
4598 | #define TRANS_CLK_SEL_PORT(x) ((x+1)<<29) | ||
4599 | |||
4600 | #define _TRANSA_MSA_MISC 0x60410 | ||
4601 | #define _TRANSB_MSA_MISC 0x61410 | ||
4602 | #define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \ | ||
4603 | _TRANSB_MSA_MISC) | ||
4604 | #define TRANS_MSA_SYNC_CLK (1<<0) | ||
4605 | #define TRANS_MSA_6_BPC (0<<5) | ||
4606 | #define TRANS_MSA_8_BPC (1<<5) | ||
4607 | #define TRANS_MSA_10_BPC (2<<5) | ||
4608 | #define TRANS_MSA_12_BPC (3<<5) | ||
4609 | #define TRANS_MSA_16_BPC (4<<5) | ||
4528 | 4610 | ||
4529 | /* LCPLL Control */ | 4611 | /* LCPLL Control */ |
4530 | #define LCPLL_CTL 0x130040 | 4612 | #define LCPLL_CTL 0x130040 |
4531 | #define LCPLL_PLL_DISABLE (1<<31) | 4613 | #define LCPLL_PLL_DISABLE (1<<31) |
4532 | #define LCPLL_PLL_LOCK (1<<30) | 4614 | #define LCPLL_PLL_LOCK (1<<30) |
4615 | #define LCPLL_CLK_FREQ_MASK (3<<26) | ||
4616 | #define LCPLL_CLK_FREQ_450 (0<<26) | ||
4533 | #define LCPLL_CD_CLOCK_DISABLE (1<<25) | 4617 | #define LCPLL_CD_CLOCK_DISABLE (1<<25) |
4534 | #define LCPLL_CD2X_CLOCK_DISABLE (1<<23) | 4618 | #define LCPLL_CD2X_CLOCK_DISABLE (1<<23) |
4619 | #define LCPLL_CD_SOURCE_FCLK (1<<21) | ||
4535 | 4620 | ||
4536 | /* Pipe WM_LINETIME - watermark line time */ | 4621 | /* Pipe WM_LINETIME - watermark line time */ |
4537 | #define PIPE_WM_LINETIME_A 0x45270 | 4622 | #define PIPE_WM_LINETIME_A 0x45270 |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 5854bddb1e9f..a818eba7cb66 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -60,9 +60,9 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe) | |||
60 | reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; | 60 | reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; |
61 | 61 | ||
62 | if (pipe == PIPE_A) | 62 | if (pipe == PIPE_A) |
63 | array = dev_priv->save_palette_a; | 63 | array = dev_priv->regfile.save_palette_a; |
64 | else | 64 | else |
65 | array = dev_priv->save_palette_b; | 65 | array = dev_priv->regfile.save_palette_b; |
66 | 66 | ||
67 | for (i = 0; i < 256; i++) | 67 | for (i = 0; i < 256; i++) |
68 | array[i] = I915_READ(reg + (i << 2)); | 68 | array[i] = I915_READ(reg + (i << 2)); |
@@ -82,9 +82,9 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) | |||
82 | reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; | 82 | reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; |
83 | 83 | ||
84 | if (pipe == PIPE_A) | 84 | if (pipe == PIPE_A) |
85 | array = dev_priv->save_palette_a; | 85 | array = dev_priv->regfile.save_palette_a; |
86 | else | 86 | else |
87 | array = dev_priv->save_palette_b; | 87 | array = dev_priv->regfile.save_palette_b; |
88 | 88 | ||
89 | for (i = 0; i < 256; i++) | 89 | for (i = 0; i < 256; i++) |
90 | I915_WRITE(reg + (i << 2), array[i]); | 90 | I915_WRITE(reg + (i << 2), array[i]); |
@@ -131,11 +131,11 @@ static void i915_save_vga(struct drm_device *dev) | |||
131 | u16 cr_index, cr_data, st01; | 131 | u16 cr_index, cr_data, st01; |
132 | 132 | ||
133 | /* VGA color palette registers */ | 133 | /* VGA color palette registers */ |
134 | dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK); | 134 | dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK); |
135 | 135 | ||
136 | /* MSR bits */ | 136 | /* MSR bits */ |
137 | dev_priv->saveMSR = I915_READ8(VGA_MSR_READ); | 137 | dev_priv->regfile.saveMSR = I915_READ8(VGA_MSR_READ); |
138 | if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { | 138 | if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) { |
139 | cr_index = VGA_CR_INDEX_CGA; | 139 | cr_index = VGA_CR_INDEX_CGA; |
140 | cr_data = VGA_CR_DATA_CGA; | 140 | cr_data = VGA_CR_DATA_CGA; |
141 | st01 = VGA_ST01_CGA; | 141 | st01 = VGA_ST01_CGA; |
@@ -150,35 +150,35 @@ static void i915_save_vga(struct drm_device *dev) | |||
150 | i915_read_indexed(dev, cr_index, cr_data, 0x11) & | 150 | i915_read_indexed(dev, cr_index, cr_data, 0x11) & |
151 | (~0x80)); | 151 | (~0x80)); |
152 | for (i = 0; i <= 0x24; i++) | 152 | for (i = 0; i <= 0x24; i++) |
153 | dev_priv->saveCR[i] = | 153 | dev_priv->regfile.saveCR[i] = |
154 | i915_read_indexed(dev, cr_index, cr_data, i); | 154 | i915_read_indexed(dev, cr_index, cr_data, i); |
155 | /* Make sure we don't turn off CR group 0 writes */ | 155 | /* Make sure we don't turn off CR group 0 writes */ |
156 | dev_priv->saveCR[0x11] &= ~0x80; | 156 | dev_priv->regfile.saveCR[0x11] &= ~0x80; |
157 | 157 | ||
158 | /* Attribute controller registers */ | 158 | /* Attribute controller registers */ |
159 | I915_READ8(st01); | 159 | I915_READ8(st01); |
160 | dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX); | 160 | dev_priv->regfile.saveAR_INDEX = I915_READ8(VGA_AR_INDEX); |
161 | for (i = 0; i <= 0x14; i++) | 161 | for (i = 0; i <= 0x14; i++) |
162 | dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0); | 162 | dev_priv->regfile.saveAR[i] = i915_read_ar(dev, st01, i, 0); |
163 | I915_READ8(st01); | 163 | I915_READ8(st01); |
164 | I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX); | 164 | I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX); |
165 | I915_READ8(st01); | 165 | I915_READ8(st01); |
166 | 166 | ||
167 | /* Graphics controller registers */ | 167 | /* Graphics controller registers */ |
168 | for (i = 0; i < 9; i++) | 168 | for (i = 0; i < 9; i++) |
169 | dev_priv->saveGR[i] = | 169 | dev_priv->regfile.saveGR[i] = |
170 | i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i); | 170 | i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i); |
171 | 171 | ||
172 | dev_priv->saveGR[0x10] = | 172 | dev_priv->regfile.saveGR[0x10] = |
173 | i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10); | 173 | i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10); |
174 | dev_priv->saveGR[0x11] = | 174 | dev_priv->regfile.saveGR[0x11] = |
175 | i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11); | 175 | i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11); |
176 | dev_priv->saveGR[0x18] = | 176 | dev_priv->regfile.saveGR[0x18] = |
177 | i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18); | 177 | i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18); |
178 | 178 | ||
179 | /* Sequencer registers */ | 179 | /* Sequencer registers */ |
180 | for (i = 0; i < 8; i++) | 180 | for (i = 0; i < 8; i++) |
181 | dev_priv->saveSR[i] = | 181 | dev_priv->regfile.saveSR[i] = |
182 | i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i); | 182 | i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i); |
183 | } | 183 | } |
184 | 184 | ||
@@ -189,8 +189,8 @@ static void i915_restore_vga(struct drm_device *dev) | |||
189 | u16 cr_index, cr_data, st01; | 189 | u16 cr_index, cr_data, st01; |
190 | 190 | ||
191 | /* MSR bits */ | 191 | /* MSR bits */ |
192 | I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR); | 192 | I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR); |
193 | if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { | 193 | if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) { |
194 | cr_index = VGA_CR_INDEX_CGA; | 194 | cr_index = VGA_CR_INDEX_CGA; |
195 | cr_data = VGA_CR_DATA_CGA; | 195 | cr_data = VGA_CR_DATA_CGA; |
196 | st01 = VGA_ST01_CGA; | 196 | st01 = VGA_ST01_CGA; |
@@ -203,36 +203,36 @@ static void i915_restore_vga(struct drm_device *dev) | |||
203 | /* Sequencer registers, don't write SR07 */ | 203 | /* Sequencer registers, don't write SR07 */ |
204 | for (i = 0; i < 7; i++) | 204 | for (i = 0; i < 7; i++) |
205 | i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i, | 205 | i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i, |
206 | dev_priv->saveSR[i]); | 206 | dev_priv->regfile.saveSR[i]); |
207 | 207 | ||
208 | /* CRT controller regs */ | 208 | /* CRT controller regs */ |
209 | /* Enable CR group 0 writes */ | 209 | /* Enable CR group 0 writes */ |
210 | i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]); | 210 | i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->regfile.saveCR[0x11]); |
211 | for (i = 0; i <= 0x24; i++) | 211 | for (i = 0; i <= 0x24; i++) |
212 | i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]); | 212 | i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->regfile.saveCR[i]); |
213 | 213 | ||
214 | /* Graphics controller regs */ | 214 | /* Graphics controller regs */ |
215 | for (i = 0; i < 9; i++) | 215 | for (i = 0; i < 9; i++) |
216 | i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i, | 216 | i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i, |
217 | dev_priv->saveGR[i]); | 217 | dev_priv->regfile.saveGR[i]); |
218 | 218 | ||
219 | i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10, | 219 | i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10, |
220 | dev_priv->saveGR[0x10]); | 220 | dev_priv->regfile.saveGR[0x10]); |
221 | i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11, | 221 | i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11, |
222 | dev_priv->saveGR[0x11]); | 222 | dev_priv->regfile.saveGR[0x11]); |
223 | i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18, | 223 | i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18, |
224 | dev_priv->saveGR[0x18]); | 224 | dev_priv->regfile.saveGR[0x18]); |
225 | 225 | ||
226 | /* Attribute controller registers */ | 226 | /* Attribute controller registers */ |
227 | I915_READ8(st01); /* switch back to index mode */ | 227 | I915_READ8(st01); /* switch back to index mode */ |
228 | for (i = 0; i <= 0x14; i++) | 228 | for (i = 0; i <= 0x14; i++) |
229 | i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0); | 229 | i915_write_ar(dev, st01, i, dev_priv->regfile.saveAR[i], 0); |
230 | I915_READ8(st01); /* switch back to index mode */ | 230 | I915_READ8(st01); /* switch back to index mode */ |
231 | I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20); | 231 | I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX | 0x20); |
232 | I915_READ8(st01); | 232 | I915_READ8(st01); |
233 | 233 | ||
234 | /* VGA color palette registers */ | 234 | /* VGA color palette registers */ |
235 | I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK); | 235 | I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK); |
236 | } | 236 | } |
237 | 237 | ||
238 | static void i915_save_modeset_reg(struct drm_device *dev) | 238 | static void i915_save_modeset_reg(struct drm_device *dev) |
@@ -244,156 +244,162 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
244 | return; | 244 | return; |
245 | 245 | ||
246 | /* Cursor state */ | 246 | /* Cursor state */ |
247 | dev_priv->saveCURACNTR = I915_READ(_CURACNTR); | 247 | dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR); |
248 | dev_priv->saveCURAPOS = I915_READ(_CURAPOS); | 248 | dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS); |
249 | dev_priv->saveCURABASE = I915_READ(_CURABASE); | 249 | dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE); |
250 | dev_priv->saveCURBCNTR = I915_READ(_CURBCNTR); | 250 | dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR); |
251 | dev_priv->saveCURBPOS = I915_READ(_CURBPOS); | 251 | dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS); |
252 | dev_priv->saveCURBBASE = I915_READ(_CURBBASE); | 252 | dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE); |
253 | if (IS_GEN2(dev)) | 253 | if (IS_GEN2(dev)) |
254 | dev_priv->saveCURSIZE = I915_READ(CURSIZE); | 254 | dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE); |
255 | 255 | ||
256 | if (HAS_PCH_SPLIT(dev)) { | 256 | if (HAS_PCH_SPLIT(dev)) { |
257 | dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); | 257 | dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); |
258 | dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); | 258 | dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); |
259 | } | 259 | } |
260 | 260 | ||
261 | /* Pipe & plane A info */ | 261 | /* Pipe & plane A info */ |
262 | dev_priv->savePIPEACONF = I915_READ(_PIPEACONF); | 262 | dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF); |
263 | dev_priv->savePIPEASRC = I915_READ(_PIPEASRC); | 263 | dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC); |
264 | if (HAS_PCH_SPLIT(dev)) { | 264 | if (HAS_PCH_SPLIT(dev)) { |
265 | dev_priv->saveFPA0 = I915_READ(_PCH_FPA0); | 265 | dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0); |
266 | dev_priv->saveFPA1 = I915_READ(_PCH_FPA1); | 266 | dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1); |
267 | dev_priv->saveDPLL_A = I915_READ(_PCH_DPLL_A); | 267 | dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A); |
268 | } else { | 268 | } else { |
269 | dev_priv->saveFPA0 = I915_READ(_FPA0); | 269 | dev_priv->regfile.saveFPA0 = I915_READ(_FPA0); |
270 | dev_priv->saveFPA1 = I915_READ(_FPA1); | 270 | dev_priv->regfile.saveFPA1 = I915_READ(_FPA1); |
271 | dev_priv->saveDPLL_A = I915_READ(_DPLL_A); | 271 | dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A); |
272 | } | 272 | } |
273 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) | 273 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) |
274 | dev_priv->saveDPLL_A_MD = I915_READ(_DPLL_A_MD); | 274 | dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD); |
275 | dev_priv->saveHTOTAL_A = I915_READ(_HTOTAL_A); | 275 | dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A); |
276 | dev_priv->saveHBLANK_A = I915_READ(_HBLANK_A); | 276 | dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A); |
277 | dev_priv->saveHSYNC_A = I915_READ(_HSYNC_A); | 277 | dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A); |
278 | dev_priv->saveVTOTAL_A = I915_READ(_VTOTAL_A); | 278 | dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A); |
279 | dev_priv->saveVBLANK_A = I915_READ(_VBLANK_A); | 279 | dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A); |
280 | dev_priv->saveVSYNC_A = I915_READ(_VSYNC_A); | 280 | dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A); |
281 | if (!HAS_PCH_SPLIT(dev)) | 281 | if (!HAS_PCH_SPLIT(dev)) |
282 | dev_priv->saveBCLRPAT_A = I915_READ(_BCLRPAT_A); | 282 | dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A); |
283 | 283 | ||
284 | if (HAS_PCH_SPLIT(dev)) { | 284 | if (HAS_PCH_SPLIT(dev)) { |
285 | dev_priv->savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1); | 285 | dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1); |
286 | dev_priv->savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1); | 286 | dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1); |
287 | dev_priv->savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1); | 287 | dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1); |
288 | dev_priv->savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1); | 288 | dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1); |
289 | 289 | ||
290 | dev_priv->saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL); | 290 | dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL); |
291 | dev_priv->saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL); | 291 | dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL); |
292 | 292 | ||
293 | dev_priv->savePFA_CTL_1 = I915_READ(_PFA_CTL_1); | 293 | dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1); |
294 | dev_priv->savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ); | 294 | dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ); |
295 | dev_priv->savePFA_WIN_POS = I915_READ(_PFA_WIN_POS); | 295 | dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS); |
296 | 296 | ||
297 | dev_priv->saveTRANSACONF = I915_READ(_TRANSACONF); | 297 | dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF); |
298 | dev_priv->saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A); | 298 | dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A); |
299 | dev_priv->saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A); | 299 | dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A); |
300 | dev_priv->saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A); | 300 | dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A); |
301 | dev_priv->saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A); | 301 | dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A); |
302 | dev_priv->saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A); | 302 | dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A); |
303 | dev_priv->saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A); | 303 | dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A); |
304 | } | 304 | } |
305 | 305 | ||
306 | dev_priv->saveDSPACNTR = I915_READ(_DSPACNTR); | 306 | dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR); |
307 | dev_priv->saveDSPASTRIDE = I915_READ(_DSPASTRIDE); | 307 | dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE); |
308 | dev_priv->saveDSPASIZE = I915_READ(_DSPASIZE); | 308 | dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE); |
309 | dev_priv->saveDSPAPOS = I915_READ(_DSPAPOS); | 309 | dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS); |
310 | dev_priv->saveDSPAADDR = I915_READ(_DSPAADDR); | 310 | dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR); |
311 | if (INTEL_INFO(dev)->gen >= 4) { | 311 | if (INTEL_INFO(dev)->gen >= 4) { |
312 | dev_priv->saveDSPASURF = I915_READ(_DSPASURF); | 312 | dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF); |
313 | dev_priv->saveDSPATILEOFF = I915_READ(_DSPATILEOFF); | 313 | dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF); |
314 | } | 314 | } |
315 | i915_save_palette(dev, PIPE_A); | 315 | i915_save_palette(dev, PIPE_A); |
316 | dev_priv->savePIPEASTAT = I915_READ(_PIPEASTAT); | 316 | dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT); |
317 | 317 | ||
318 | /* Pipe & plane B info */ | 318 | /* Pipe & plane B info */ |
319 | dev_priv->savePIPEBCONF = I915_READ(_PIPEBCONF); | 319 | dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF); |
320 | dev_priv->savePIPEBSRC = I915_READ(_PIPEBSRC); | 320 | dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC); |
321 | if (HAS_PCH_SPLIT(dev)) { | 321 | if (HAS_PCH_SPLIT(dev)) { |
322 | dev_priv->saveFPB0 = I915_READ(_PCH_FPB0); | 322 | dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0); |
323 | dev_priv->saveFPB1 = I915_READ(_PCH_FPB1); | 323 | dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1); |
324 | dev_priv->saveDPLL_B = I915_READ(_PCH_DPLL_B); | 324 | dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B); |
325 | } else { | 325 | } else { |
326 | dev_priv->saveFPB0 = I915_READ(_FPB0); | 326 | dev_priv->regfile.saveFPB0 = I915_READ(_FPB0); |
327 | dev_priv->saveFPB1 = I915_READ(_FPB1); | 327 | dev_priv->regfile.saveFPB1 = I915_READ(_FPB1); |
328 | dev_priv->saveDPLL_B = I915_READ(_DPLL_B); | 328 | dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B); |
329 | } | 329 | } |
330 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) | 330 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) |
331 | dev_priv->saveDPLL_B_MD = I915_READ(_DPLL_B_MD); | 331 | dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD); |
332 | dev_priv->saveHTOTAL_B = I915_READ(_HTOTAL_B); | 332 | dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B); |
333 | dev_priv->saveHBLANK_B = I915_READ(_HBLANK_B); | 333 | dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B); |
334 | dev_priv->saveHSYNC_B = I915_READ(_HSYNC_B); | 334 | dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B); |
335 | dev_priv->saveVTOTAL_B = I915_READ(_VTOTAL_B); | 335 | dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B); |
336 | dev_priv->saveVBLANK_B = I915_READ(_VBLANK_B); | 336 | dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B); |
337 | dev_priv->saveVSYNC_B = I915_READ(_VSYNC_B); | 337 | dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B); |
338 | if (!HAS_PCH_SPLIT(dev)) | 338 | if (!HAS_PCH_SPLIT(dev)) |
339 | dev_priv->saveBCLRPAT_B = I915_READ(_BCLRPAT_B); | 339 | dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B); |
340 | 340 | ||
341 | if (HAS_PCH_SPLIT(dev)) { | 341 | if (HAS_PCH_SPLIT(dev)) { |
342 | dev_priv->savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1); | 342 | dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1); |
343 | dev_priv->savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1); | 343 | dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1); |
344 | dev_priv->savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1); | 344 | dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1); |
345 | dev_priv->savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1); | 345 | dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1); |
346 | 346 | ||
347 | dev_priv->saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL); | 347 | dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL); |
348 | dev_priv->saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL); | 348 | dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL); |
349 | 349 | ||
350 | dev_priv->savePFB_CTL_1 = I915_READ(_PFB_CTL_1); | 350 | dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1); |
351 | dev_priv->savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ); | 351 | dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ); |
352 | dev_priv->savePFB_WIN_POS = I915_READ(_PFB_WIN_POS); | 352 | dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS); |
353 | 353 | ||
354 | dev_priv->saveTRANSBCONF = I915_READ(_TRANSBCONF); | 354 | dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF); |
355 | dev_priv->saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B); | 355 | dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B); |
356 | dev_priv->saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B); | 356 | dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B); |
357 | dev_priv->saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B); | 357 | dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B); |
358 | dev_priv->saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B); | 358 | dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B); |
359 | dev_priv->saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B); | 359 | dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B); |
360 | dev_priv->saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B); | 360 | dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B); |
361 | } | 361 | } |
362 | 362 | ||
363 | dev_priv->saveDSPBCNTR = I915_READ(_DSPBCNTR); | 363 | dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR); |
364 | dev_priv->saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE); | 364 | dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE); |
365 | dev_priv->saveDSPBSIZE = I915_READ(_DSPBSIZE); | 365 | dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE); |
366 | dev_priv->saveDSPBPOS = I915_READ(_DSPBPOS); | 366 | dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS); |
367 | dev_priv->saveDSPBADDR = I915_READ(_DSPBADDR); | 367 | dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR); |
368 | if (INTEL_INFO(dev)->gen >= 4) { | 368 | if (INTEL_INFO(dev)->gen >= 4) { |
369 | dev_priv->saveDSPBSURF = I915_READ(_DSPBSURF); | 369 | dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF); |
370 | dev_priv->saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF); | 370 | dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF); |
371 | } | 371 | } |
372 | i915_save_palette(dev, PIPE_B); | 372 | i915_save_palette(dev, PIPE_B); |
373 | dev_priv->savePIPEBSTAT = I915_READ(_PIPEBSTAT); | 373 | dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT); |
374 | 374 | ||
375 | /* Fences */ | 375 | /* Fences */ |
376 | switch (INTEL_INFO(dev)->gen) { | 376 | switch (INTEL_INFO(dev)->gen) { |
377 | case 7: | 377 | case 7: |
378 | case 6: | 378 | case 6: |
379 | for (i = 0; i < 16; i++) | 379 | for (i = 0; i < 16; i++) |
380 | dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); | 380 | dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); |
381 | break; | 381 | break; |
382 | case 5: | 382 | case 5: |
383 | case 4: | 383 | case 4: |
384 | for (i = 0; i < 16; i++) | 384 | for (i = 0; i < 16; i++) |
385 | dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); | 385 | dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); |
386 | break; | 386 | break; |
387 | case 3: | 387 | case 3: |
388 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | 388 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
389 | for (i = 0; i < 8; i++) | 389 | for (i = 0; i < 8; i++) |
390 | dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); | 390 | dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); |
391 | case 2: | 391 | case 2: |
392 | for (i = 0; i < 8; i++) | 392 | for (i = 0; i < 8; i++) |
393 | dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); | 393 | dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); |
394 | break; | 394 | break; |
395 | } | 395 | } |
396 | 396 | ||
397 | /* CRT state */ | ||
398 | if (HAS_PCH_SPLIT(dev)) | ||
399 | dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA); | ||
400 | else | ||
401 | dev_priv->regfile.saveADPA = I915_READ(ADPA); | ||
402 | |||
397 | return; | 403 | return; |
398 | } | 404 | } |
399 | 405 | ||
@@ -412,20 +418,20 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
412 | case 7: | 418 | case 7: |
413 | case 6: | 419 | case 6: |
414 | for (i = 0; i < 16; i++) | 420 | for (i = 0; i < 16; i++) |
415 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); | 421 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]); |
416 | break; | 422 | break; |
417 | case 5: | 423 | case 5: |
418 | case 4: | 424 | case 4: |
419 | for (i = 0; i < 16; i++) | 425 | for (i = 0; i < 16; i++) |
420 | I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); | 426 | I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]); |
421 | break; | 427 | break; |
422 | case 3: | 428 | case 3: |
423 | case 2: | 429 | case 2: |
424 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | 430 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
425 | for (i = 0; i < 8; i++) | 431 | for (i = 0; i < 8; i++) |
426 | I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); | 432 | I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]); |
427 | for (i = 0; i < 8; i++) | 433 | for (i = 0; i < 8; i++) |
428 | I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); | 434 | I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]); |
429 | break; | 435 | break; |
430 | } | 436 | } |
431 | 437 | ||
@@ -447,158 +453,164 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
447 | } | 453 | } |
448 | 454 | ||
449 | if (HAS_PCH_SPLIT(dev)) { | 455 | if (HAS_PCH_SPLIT(dev)) { |
450 | I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL); | 456 | I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL); |
451 | I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL); | 457 | I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL); |
452 | } | 458 | } |
453 | 459 | ||
454 | /* Pipe & plane A info */ | 460 | /* Pipe & plane A info */ |
455 | /* Prime the clock */ | 461 | /* Prime the clock */ |
456 | if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { | 462 | if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) { |
457 | I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A & | 463 | I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A & |
458 | ~DPLL_VCO_ENABLE); | 464 | ~DPLL_VCO_ENABLE); |
459 | POSTING_READ(dpll_a_reg); | 465 | POSTING_READ(dpll_a_reg); |
460 | udelay(150); | 466 | udelay(150); |
461 | } | 467 | } |
462 | I915_WRITE(fpa0_reg, dev_priv->saveFPA0); | 468 | I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0); |
463 | I915_WRITE(fpa1_reg, dev_priv->saveFPA1); | 469 | I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1); |
464 | /* Actually enable it */ | 470 | /* Actually enable it */ |
465 | I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A); | 471 | I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A); |
466 | POSTING_READ(dpll_a_reg); | 472 | POSTING_READ(dpll_a_reg); |
467 | udelay(150); | 473 | udelay(150); |
468 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { | 474 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { |
469 | I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD); | 475 | I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD); |
470 | POSTING_READ(_DPLL_A_MD); | 476 | POSTING_READ(_DPLL_A_MD); |
471 | } | 477 | } |
472 | udelay(150); | 478 | udelay(150); |
473 | 479 | ||
474 | /* Restore mode */ | 480 | /* Restore mode */ |
475 | I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A); | 481 | I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A); |
476 | I915_WRITE(_HBLANK_A, dev_priv->saveHBLANK_A); | 482 | I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A); |
477 | I915_WRITE(_HSYNC_A, dev_priv->saveHSYNC_A); | 483 | I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A); |
478 | I915_WRITE(_VTOTAL_A, dev_priv->saveVTOTAL_A); | 484 | I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A); |
479 | I915_WRITE(_VBLANK_A, dev_priv->saveVBLANK_A); | 485 | I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A); |
480 | I915_WRITE(_VSYNC_A, dev_priv->saveVSYNC_A); | 486 | I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A); |
481 | if (!HAS_PCH_SPLIT(dev)) | 487 | if (!HAS_PCH_SPLIT(dev)) |
482 | I915_WRITE(_BCLRPAT_A, dev_priv->saveBCLRPAT_A); | 488 | I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A); |
483 | 489 | ||
484 | if (HAS_PCH_SPLIT(dev)) { | 490 | if (HAS_PCH_SPLIT(dev)) { |
485 | I915_WRITE(_PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1); | 491 | I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1); |
486 | I915_WRITE(_PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1); | 492 | I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1); |
487 | I915_WRITE(_PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1); | 493 | I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1); |
488 | I915_WRITE(_PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1); | 494 | I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1); |
489 | 495 | ||
490 | I915_WRITE(_FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL); | 496 | I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL); |
491 | I915_WRITE(_FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL); | 497 | I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL); |
492 | 498 | ||
493 | I915_WRITE(_PFA_CTL_1, dev_priv->savePFA_CTL_1); | 499 | I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1); |
494 | I915_WRITE(_PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ); | 500 | I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ); |
495 | I915_WRITE(_PFA_WIN_POS, dev_priv->savePFA_WIN_POS); | 501 | I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS); |
496 | 502 | ||
497 | I915_WRITE(_TRANSACONF, dev_priv->saveTRANSACONF); | 503 | I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF); |
498 | I915_WRITE(_TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A); | 504 | I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A); |
499 | I915_WRITE(_TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A); | 505 | I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A); |
500 | I915_WRITE(_TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A); | 506 | I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A); |
501 | I915_WRITE(_TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A); | 507 | I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A); |
502 | I915_WRITE(_TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A); | 508 | I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A); |
503 | I915_WRITE(_TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A); | 509 | I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A); |
504 | } | 510 | } |
505 | 511 | ||
506 | /* Restore plane info */ | 512 | /* Restore plane info */ |
507 | I915_WRITE(_DSPASIZE, dev_priv->saveDSPASIZE); | 513 | I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE); |
508 | I915_WRITE(_DSPAPOS, dev_priv->saveDSPAPOS); | 514 | I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS); |
509 | I915_WRITE(_PIPEASRC, dev_priv->savePIPEASRC); | 515 | I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC); |
510 | I915_WRITE(_DSPAADDR, dev_priv->saveDSPAADDR); | 516 | I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR); |
511 | I915_WRITE(_DSPASTRIDE, dev_priv->saveDSPASTRIDE); | 517 | I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE); |
512 | if (INTEL_INFO(dev)->gen >= 4) { | 518 | if (INTEL_INFO(dev)->gen >= 4) { |
513 | I915_WRITE(_DSPASURF, dev_priv->saveDSPASURF); | 519 | I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF); |
514 | I915_WRITE(_DSPATILEOFF, dev_priv->saveDSPATILEOFF); | 520 | I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF); |
515 | } | 521 | } |
516 | 522 | ||
517 | I915_WRITE(_PIPEACONF, dev_priv->savePIPEACONF); | 523 | I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF); |
518 | 524 | ||
519 | i915_restore_palette(dev, PIPE_A); | 525 | i915_restore_palette(dev, PIPE_A); |
520 | /* Enable the plane */ | 526 | /* Enable the plane */ |
521 | I915_WRITE(_DSPACNTR, dev_priv->saveDSPACNTR); | 527 | I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR); |
522 | I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR)); | 528 | I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR)); |
523 | 529 | ||
524 | /* Pipe & plane B info */ | 530 | /* Pipe & plane B info */ |
525 | if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { | 531 | if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) { |
526 | I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B & | 532 | I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B & |
527 | ~DPLL_VCO_ENABLE); | 533 | ~DPLL_VCO_ENABLE); |
528 | POSTING_READ(dpll_b_reg); | 534 | POSTING_READ(dpll_b_reg); |
529 | udelay(150); | 535 | udelay(150); |
530 | } | 536 | } |
531 | I915_WRITE(fpb0_reg, dev_priv->saveFPB0); | 537 | I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0); |
532 | I915_WRITE(fpb1_reg, dev_priv->saveFPB1); | 538 | I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1); |
533 | /* Actually enable it */ | 539 | /* Actually enable it */ |
534 | I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B); | 540 | I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B); |
535 | POSTING_READ(dpll_b_reg); | 541 | POSTING_READ(dpll_b_reg); |
536 | udelay(150); | 542 | udelay(150); |
537 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { | 543 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { |
538 | I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD); | 544 | I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD); |
539 | POSTING_READ(_DPLL_B_MD); | 545 | POSTING_READ(_DPLL_B_MD); |
540 | } | 546 | } |
541 | udelay(150); | 547 | udelay(150); |
542 | 548 | ||
543 | /* Restore mode */ | 549 | /* Restore mode */ |
544 | I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B); | 550 | I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B); |
545 | I915_WRITE(_HBLANK_B, dev_priv->saveHBLANK_B); | 551 | I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B); |
546 | I915_WRITE(_HSYNC_B, dev_priv->saveHSYNC_B); | 552 | I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B); |
547 | I915_WRITE(_VTOTAL_B, dev_priv->saveVTOTAL_B); | 553 | I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B); |
548 | I915_WRITE(_VBLANK_B, dev_priv->saveVBLANK_B); | 554 | I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B); |
549 | I915_WRITE(_VSYNC_B, dev_priv->saveVSYNC_B); | 555 | I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B); |
550 | if (!HAS_PCH_SPLIT(dev)) | 556 | if (!HAS_PCH_SPLIT(dev)) |
551 | I915_WRITE(_BCLRPAT_B, dev_priv->saveBCLRPAT_B); | 557 | I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B); |
552 | 558 | ||
553 | if (HAS_PCH_SPLIT(dev)) { | 559 | if (HAS_PCH_SPLIT(dev)) { |
554 | I915_WRITE(_PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1); | 560 | I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1); |
555 | I915_WRITE(_PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1); | 561 | I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1); |
556 | I915_WRITE(_PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1); | 562 | I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1); |
557 | I915_WRITE(_PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1); | 563 | I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1); |
558 | 564 | ||
559 | I915_WRITE(_FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL); | 565 | I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL); |
560 | I915_WRITE(_FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL); | 566 | I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL); |
561 | 567 | ||
562 | I915_WRITE(_PFB_CTL_1, dev_priv->savePFB_CTL_1); | 568 | I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1); |
563 | I915_WRITE(_PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ); | 569 | I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ); |
564 | I915_WRITE(_PFB_WIN_POS, dev_priv->savePFB_WIN_POS); | 570 | I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS); |
565 | 571 | ||
566 | I915_WRITE(_TRANSBCONF, dev_priv->saveTRANSBCONF); | 572 | I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF); |
567 | I915_WRITE(_TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B); | 573 | I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B); |
568 | I915_WRITE(_TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B); | 574 | I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B); |
569 | I915_WRITE(_TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B); | 575 | I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B); |
570 | I915_WRITE(_TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B); | 576 | I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B); |
571 | I915_WRITE(_TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B); | 577 | I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B); |
572 | I915_WRITE(_TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B); | 578 | I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B); |
573 | } | 579 | } |
574 | 580 | ||
575 | /* Restore plane info */ | 581 | /* Restore plane info */ |
576 | I915_WRITE(_DSPBSIZE, dev_priv->saveDSPBSIZE); | 582 | I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE); |
577 | I915_WRITE(_DSPBPOS, dev_priv->saveDSPBPOS); | 583 | I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS); |
578 | I915_WRITE(_PIPEBSRC, dev_priv->savePIPEBSRC); | 584 | I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC); |
579 | I915_WRITE(_DSPBADDR, dev_priv->saveDSPBADDR); | 585 | I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR); |
580 | I915_WRITE(_DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); | 586 | I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE); |
581 | if (INTEL_INFO(dev)->gen >= 4) { | 587 | if (INTEL_INFO(dev)->gen >= 4) { |
582 | I915_WRITE(_DSPBSURF, dev_priv->saveDSPBSURF); | 588 | I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF); |
583 | I915_WRITE(_DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); | 589 | I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF); |
584 | } | 590 | } |
585 | 591 | ||
586 | I915_WRITE(_PIPEBCONF, dev_priv->savePIPEBCONF); | 592 | I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF); |
587 | 593 | ||
588 | i915_restore_palette(dev, PIPE_B); | 594 | i915_restore_palette(dev, PIPE_B); |
589 | /* Enable the plane */ | 595 | /* Enable the plane */ |
590 | I915_WRITE(_DSPBCNTR, dev_priv->saveDSPBCNTR); | 596 | I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR); |
591 | I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR)); | 597 | I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR)); |
592 | 598 | ||
593 | /* Cursor state */ | 599 | /* Cursor state */ |
594 | I915_WRITE(_CURAPOS, dev_priv->saveCURAPOS); | 600 | I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS); |
595 | I915_WRITE(_CURACNTR, dev_priv->saveCURACNTR); | 601 | I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR); |
596 | I915_WRITE(_CURABASE, dev_priv->saveCURABASE); | 602 | I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE); |
597 | I915_WRITE(_CURBPOS, dev_priv->saveCURBPOS); | 603 | I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS); |
598 | I915_WRITE(_CURBCNTR, dev_priv->saveCURBCNTR); | 604 | I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR); |
599 | I915_WRITE(_CURBBASE, dev_priv->saveCURBBASE); | 605 | I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE); |
600 | if (IS_GEN2(dev)) | 606 | if (IS_GEN2(dev)) |
601 | I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); | 607 | I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE); |
608 | |||
609 | /* CRT state */ | ||
610 | if (HAS_PCH_SPLIT(dev)) | ||
611 | I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA); | ||
612 | else | ||
613 | I915_WRITE(ADPA, dev_priv->regfile.saveADPA); | ||
602 | 614 | ||
603 | return; | 615 | return; |
604 | } | 616 | } |
@@ -608,89 +620,84 @@ static void i915_save_display(struct drm_device *dev) | |||
608 | struct drm_i915_private *dev_priv = dev->dev_private; | 620 | struct drm_i915_private *dev_priv = dev->dev_private; |
609 | 621 | ||
610 | /* Display arbitration control */ | 622 | /* Display arbitration control */ |
611 | dev_priv->saveDSPARB = I915_READ(DSPARB); | 623 | dev_priv->regfile.saveDSPARB = I915_READ(DSPARB); |
612 | 624 | ||
613 | /* This is only meaningful in non-KMS mode */ | 625 | /* This is only meaningful in non-KMS mode */ |
614 | /* Don't save them in KMS mode */ | 626 | /* Don't regfile.save them in KMS mode */ |
615 | i915_save_modeset_reg(dev); | 627 | i915_save_modeset_reg(dev); |
616 | 628 | ||
617 | /* CRT state */ | ||
618 | if (HAS_PCH_SPLIT(dev)) { | ||
619 | dev_priv->saveADPA = I915_READ(PCH_ADPA); | ||
620 | } else { | ||
621 | dev_priv->saveADPA = I915_READ(ADPA); | ||
622 | } | ||
623 | |||
624 | /* LVDS state */ | 629 | /* LVDS state */ |
625 | if (HAS_PCH_SPLIT(dev)) { | 630 | if (HAS_PCH_SPLIT(dev)) { |
626 | dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL); | 631 | dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL); |
627 | dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); | 632 | dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); |
628 | dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); | 633 | dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); |
629 | dev_priv->saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL); | 634 | dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL); |
630 | dev_priv->saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2); | 635 | dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2); |
631 | dev_priv->saveLVDS = I915_READ(PCH_LVDS); | 636 | dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS); |
632 | } else { | 637 | } else { |
633 | dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL); | 638 | dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); |
634 | dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); | 639 | dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); |
635 | dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); | 640 | dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); |
636 | dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL); | 641 | dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL); |
637 | if (INTEL_INFO(dev)->gen >= 4) | 642 | if (INTEL_INFO(dev)->gen >= 4) |
638 | dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); | 643 | dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); |
639 | if (IS_MOBILE(dev) && !IS_I830(dev)) | 644 | if (IS_MOBILE(dev) && !IS_I830(dev)) |
640 | dev_priv->saveLVDS = I915_READ(LVDS); | 645 | dev_priv->regfile.saveLVDS = I915_READ(LVDS); |
641 | } | 646 | } |
642 | 647 | ||
643 | if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) | 648 | if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) |
644 | dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); | 649 | dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL); |
645 | 650 | ||
646 | if (HAS_PCH_SPLIT(dev)) { | 651 | if (HAS_PCH_SPLIT(dev)) { |
647 | dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); | 652 | dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); |
648 | dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); | 653 | dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); |
649 | dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); | 654 | dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); |
650 | } else { | 655 | } else { |
651 | dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); | 656 | dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); |
652 | dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); | 657 | dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); |
653 | dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); | 658 | dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR); |
654 | } | 659 | } |
655 | 660 | ||
656 | /* Display Port state */ | 661 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) { |
657 | if (SUPPORTS_INTEGRATED_DP(dev)) { | 662 | /* Display Port state */ |
658 | dev_priv->saveDP_B = I915_READ(DP_B); | 663 | if (SUPPORTS_INTEGRATED_DP(dev)) { |
659 | dev_priv->saveDP_C = I915_READ(DP_C); | 664 | dev_priv->regfile.saveDP_B = I915_READ(DP_B); |
660 | dev_priv->saveDP_D = I915_READ(DP_D); | 665 | dev_priv->regfile.saveDP_C = I915_READ(DP_C); |
661 | dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M); | 666 | dev_priv->regfile.saveDP_D = I915_READ(DP_D); |
662 | dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M); | 667 | dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M); |
663 | dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N); | 668 | dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M); |
664 | dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N); | 669 | dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N); |
665 | dev_priv->savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M); | 670 | dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N); |
666 | dev_priv->savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M); | 671 | dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M); |
667 | dev_priv->savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N); | 672 | dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M); |
668 | dev_priv->savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N); | 673 | dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N); |
669 | } | 674 | dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N); |
670 | /* FIXME: save TV & SDVO state */ | 675 | } |
671 | 676 | /* FIXME: regfile.save TV & SDVO state */ | |
672 | /* Only save FBC state on the platform that supports FBC */ | 677 | } |
678 | |||
679 | /* Only regfile.save FBC state on the platform that supports FBC */ | ||
673 | if (I915_HAS_FBC(dev)) { | 680 | if (I915_HAS_FBC(dev)) { |
674 | if (HAS_PCH_SPLIT(dev)) { | 681 | if (HAS_PCH_SPLIT(dev)) { |
675 | dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE); | 682 | dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE); |
676 | } else if (IS_GM45(dev)) { | 683 | } else if (IS_GM45(dev)) { |
677 | dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); | 684 | dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); |
678 | } else { | 685 | } else { |
679 | dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); | 686 | dev_priv->regfile.saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); |
680 | dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); | 687 | dev_priv->regfile.saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); |
681 | dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); | 688 | dev_priv->regfile.saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); |
682 | dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); | 689 | dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL); |
683 | } | 690 | } |
684 | } | 691 | } |
685 | 692 | ||
686 | /* VGA state */ | 693 | /* VGA state */ |
687 | dev_priv->saveVGA0 = I915_READ(VGA0); | 694 | dev_priv->regfile.saveVGA0 = I915_READ(VGA0); |
688 | dev_priv->saveVGA1 = I915_READ(VGA1); | 695 | dev_priv->regfile.saveVGA1 = I915_READ(VGA1); |
689 | dev_priv->saveVGA_PD = I915_READ(VGA_PD); | 696 | dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD); |
690 | if (HAS_PCH_SPLIT(dev)) | 697 | if (HAS_PCH_SPLIT(dev)) |
691 | dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL); | 698 | dev_priv->regfile.saveVGACNTRL = I915_READ(CPU_VGACNTRL); |
692 | else | 699 | else |
693 | dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); | 700 | dev_priv->regfile.saveVGACNTRL = I915_READ(VGACNTRL); |
694 | 701 | ||
695 | i915_save_vga(dev); | 702 | i915_save_vga(dev); |
696 | } | 703 | } |
@@ -700,97 +707,95 @@ static void i915_restore_display(struct drm_device *dev) | |||
700 | struct drm_i915_private *dev_priv = dev->dev_private; | 707 | struct drm_i915_private *dev_priv = dev->dev_private; |
701 | 708 | ||
702 | /* Display arbitration */ | 709 | /* Display arbitration */ |
703 | I915_WRITE(DSPARB, dev_priv->saveDSPARB); | 710 | I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB); |
704 | 711 | ||
705 | /* Display port ratios (must be done before clock is set) */ | 712 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) { |
706 | if (SUPPORTS_INTEGRATED_DP(dev)) { | 713 | /* Display port ratios (must be done before clock is set) */ |
707 | I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M); | 714 | if (SUPPORTS_INTEGRATED_DP(dev)) { |
708 | I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M); | 715 | I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M); |
709 | I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N); | 716 | I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M); |
710 | I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N); | 717 | I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N); |
711 | I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M); | 718 | I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N); |
712 | I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M); | 719 | I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M); |
713 | I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N); | 720 | I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M); |
714 | I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N); | 721 | I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N); |
722 | I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N); | ||
723 | } | ||
715 | } | 724 | } |
716 | 725 | ||
717 | /* This is only meaningful in non-KMS mode */ | 726 | /* This is only meaningful in non-KMS mode */ |
718 | /* Don't restore them in KMS mode */ | 727 | /* Don't restore them in KMS mode */ |
719 | i915_restore_modeset_reg(dev); | 728 | i915_restore_modeset_reg(dev); |
720 | 729 | ||
721 | /* CRT state */ | ||
722 | if (HAS_PCH_SPLIT(dev)) | ||
723 | I915_WRITE(PCH_ADPA, dev_priv->saveADPA); | ||
724 | else | ||
725 | I915_WRITE(ADPA, dev_priv->saveADPA); | ||
726 | |||
727 | /* LVDS state */ | 730 | /* LVDS state */ |
728 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) | 731 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) |
729 | I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); | 732 | I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2); |
730 | 733 | ||
731 | if (HAS_PCH_SPLIT(dev)) { | 734 | if (HAS_PCH_SPLIT(dev)) { |
732 | I915_WRITE(PCH_LVDS, dev_priv->saveLVDS); | 735 | I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS); |
733 | } else if (IS_MOBILE(dev) && !IS_I830(dev)) | 736 | } else if (IS_MOBILE(dev) && !IS_I830(dev)) |
734 | I915_WRITE(LVDS, dev_priv->saveLVDS); | 737 | I915_WRITE(LVDS, dev_priv->regfile.saveLVDS); |
735 | 738 | ||
736 | if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) | 739 | if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) |
737 | I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); | 740 | I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL); |
738 | 741 | ||
739 | if (HAS_PCH_SPLIT(dev)) { | 742 | if (HAS_PCH_SPLIT(dev)) { |
740 | I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL); | 743 | I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL); |
741 | I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2); | 744 | I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2); |
742 | /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2; | 745 | /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2; |
743 | * otherwise we get blank eDP screen after S3 on some machines | 746 | * otherwise we get blank eDP screen after S3 on some machines |
744 | */ | 747 | */ |
745 | I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2); | 748 | I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2); |
746 | I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL); | 749 | I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL); |
747 | I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS); | 750 | I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); |
748 | I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); | 751 | I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); |
749 | I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); | 752 | I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); |
750 | I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); | 753 | I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL); |
751 | I915_WRITE(RSTDBYCTL, | 754 | I915_WRITE(RSTDBYCTL, |
752 | dev_priv->saveMCHBAR_RENDER_STANDBY); | 755 | dev_priv->regfile.saveMCHBAR_RENDER_STANDBY); |
753 | } else { | 756 | } else { |
754 | I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); | 757 | I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS); |
755 | I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); | 758 | I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL); |
756 | I915_WRITE(BLC_HIST_CTL, dev_priv->saveBLC_HIST_CTL); | 759 | I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL); |
757 | I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS); | 760 | I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); |
758 | I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); | 761 | I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); |
759 | I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); | 762 | I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); |
760 | I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); | 763 | I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL); |
761 | } | 764 | } |
762 | 765 | ||
763 | /* Display Port state */ | 766 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) { |
764 | if (SUPPORTS_INTEGRATED_DP(dev)) { | 767 | /* Display Port state */ |
765 | I915_WRITE(DP_B, dev_priv->saveDP_B); | 768 | if (SUPPORTS_INTEGRATED_DP(dev)) { |
766 | I915_WRITE(DP_C, dev_priv->saveDP_C); | 769 | I915_WRITE(DP_B, dev_priv->regfile.saveDP_B); |
767 | I915_WRITE(DP_D, dev_priv->saveDP_D); | 770 | I915_WRITE(DP_C, dev_priv->regfile.saveDP_C); |
771 | I915_WRITE(DP_D, dev_priv->regfile.saveDP_D); | ||
772 | } | ||
773 | /* FIXME: restore TV & SDVO state */ | ||
768 | } | 774 | } |
769 | /* FIXME: restore TV & SDVO state */ | ||
770 | 775 | ||
771 | /* only restore FBC info on the platform that supports FBC*/ | 776 | /* only restore FBC info on the platform that supports FBC*/ |
772 | intel_disable_fbc(dev); | 777 | intel_disable_fbc(dev); |
773 | if (I915_HAS_FBC(dev)) { | 778 | if (I915_HAS_FBC(dev)) { |
774 | if (HAS_PCH_SPLIT(dev)) { | 779 | if (HAS_PCH_SPLIT(dev)) { |
775 | I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); | 780 | I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE); |
776 | } else if (IS_GM45(dev)) { | 781 | } else if (IS_GM45(dev)) { |
777 | I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); | 782 | I915_WRITE(DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE); |
778 | } else { | 783 | } else { |
779 | I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); | 784 | I915_WRITE(FBC_CFB_BASE, dev_priv->regfile.saveFBC_CFB_BASE); |
780 | I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); | 785 | I915_WRITE(FBC_LL_BASE, dev_priv->regfile.saveFBC_LL_BASE); |
781 | I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); | 786 | I915_WRITE(FBC_CONTROL2, dev_priv->regfile.saveFBC_CONTROL2); |
782 | I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); | 787 | I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL); |
783 | } | 788 | } |
784 | } | 789 | } |
785 | /* VGA state */ | 790 | /* VGA state */ |
786 | if (HAS_PCH_SPLIT(dev)) | 791 | if (HAS_PCH_SPLIT(dev)) |
787 | I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); | 792 | I915_WRITE(CPU_VGACNTRL, dev_priv->regfile.saveVGACNTRL); |
788 | else | 793 | else |
789 | I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); | 794 | I915_WRITE(VGACNTRL, dev_priv->regfile.saveVGACNTRL); |
790 | 795 | ||
791 | I915_WRITE(VGA0, dev_priv->saveVGA0); | 796 | I915_WRITE(VGA0, dev_priv->regfile.saveVGA0); |
792 | I915_WRITE(VGA1, dev_priv->saveVGA1); | 797 | I915_WRITE(VGA1, dev_priv->regfile.saveVGA1); |
793 | I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); | 798 | I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD); |
794 | POSTING_READ(VGA_PD); | 799 | POSTING_READ(VGA_PD); |
795 | udelay(150); | 800 | udelay(150); |
796 | 801 | ||
@@ -802,46 +807,49 @@ int i915_save_state(struct drm_device *dev) | |||
802 | struct drm_i915_private *dev_priv = dev->dev_private; | 807 | struct drm_i915_private *dev_priv = dev->dev_private; |
803 | int i; | 808 | int i; |
804 | 809 | ||
805 | pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); | 810 | pci_read_config_byte(dev->pdev, LBB, &dev_priv->regfile.saveLBB); |
806 | 811 | ||
807 | mutex_lock(&dev->struct_mutex); | 812 | mutex_lock(&dev->struct_mutex); |
808 | 813 | ||
809 | /* Hardware status page */ | 814 | /* Hardware status page */ |
810 | dev_priv->saveHWS = I915_READ(HWS_PGA); | 815 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
816 | dev_priv->regfile.saveHWS = I915_READ(HWS_PGA); | ||
811 | 817 | ||
812 | i915_save_display(dev); | 818 | i915_save_display(dev); |
813 | 819 | ||
814 | /* Interrupt state */ | 820 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) { |
815 | if (HAS_PCH_SPLIT(dev)) { | 821 | /* Interrupt state */ |
816 | dev_priv->saveDEIER = I915_READ(DEIER); | 822 | if (HAS_PCH_SPLIT(dev)) { |
817 | dev_priv->saveDEIMR = I915_READ(DEIMR); | 823 | dev_priv->regfile.saveDEIER = I915_READ(DEIER); |
818 | dev_priv->saveGTIER = I915_READ(GTIER); | 824 | dev_priv->regfile.saveDEIMR = I915_READ(DEIMR); |
819 | dev_priv->saveGTIMR = I915_READ(GTIMR); | 825 | dev_priv->regfile.saveGTIER = I915_READ(GTIER); |
820 | dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR); | 826 | dev_priv->regfile.saveGTIMR = I915_READ(GTIMR); |
821 | dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR); | 827 | dev_priv->regfile.saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR); |
822 | dev_priv->saveMCHBAR_RENDER_STANDBY = | 828 | dev_priv->regfile.saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR); |
823 | I915_READ(RSTDBYCTL); | 829 | dev_priv->regfile.saveMCHBAR_RENDER_STANDBY = |
824 | dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG); | 830 | I915_READ(RSTDBYCTL); |
825 | } else { | 831 | dev_priv->regfile.savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG); |
826 | dev_priv->saveIER = I915_READ(IER); | 832 | } else { |
827 | dev_priv->saveIMR = I915_READ(IMR); | 833 | dev_priv->regfile.saveIER = I915_READ(IER); |
834 | dev_priv->regfile.saveIMR = I915_READ(IMR); | ||
835 | } | ||
828 | } | 836 | } |
829 | 837 | ||
830 | intel_disable_gt_powersave(dev); | 838 | intel_disable_gt_powersave(dev); |
831 | 839 | ||
832 | /* Cache mode state */ | 840 | /* Cache mode state */ |
833 | dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); | 841 | dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); |
834 | 842 | ||
835 | /* Memory Arbitration state */ | 843 | /* Memory Arbitration state */ |
836 | dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); | 844 | dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); |
837 | 845 | ||
838 | /* Scratch space */ | 846 | /* Scratch space */ |
839 | for (i = 0; i < 16; i++) { | 847 | for (i = 0; i < 16; i++) { |
840 | dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2)); | 848 | dev_priv->regfile.saveSWF0[i] = I915_READ(SWF00 + (i << 2)); |
841 | dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2)); | 849 | dev_priv->regfile.saveSWF1[i] = I915_READ(SWF10 + (i << 2)); |
842 | } | 850 | } |
843 | for (i = 0; i < 3; i++) | 851 | for (i = 0; i < 3; i++) |
844 | dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); | 852 | dev_priv->regfile.saveSWF2[i] = I915_READ(SWF30 + (i << 2)); |
845 | 853 | ||
846 | mutex_unlock(&dev->struct_mutex); | 854 | mutex_unlock(&dev->struct_mutex); |
847 | 855 | ||
@@ -853,41 +861,44 @@ int i915_restore_state(struct drm_device *dev) | |||
853 | struct drm_i915_private *dev_priv = dev->dev_private; | 861 | struct drm_i915_private *dev_priv = dev->dev_private; |
854 | int i; | 862 | int i; |
855 | 863 | ||
856 | pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); | 864 | pci_write_config_byte(dev->pdev, LBB, dev_priv->regfile.saveLBB); |
857 | 865 | ||
858 | mutex_lock(&dev->struct_mutex); | 866 | mutex_lock(&dev->struct_mutex); |
859 | 867 | ||
860 | /* Hardware status page */ | 868 | /* Hardware status page */ |
861 | I915_WRITE(HWS_PGA, dev_priv->saveHWS); | 869 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
870 | I915_WRITE(HWS_PGA, dev_priv->regfile.saveHWS); | ||
862 | 871 | ||
863 | i915_restore_display(dev); | 872 | i915_restore_display(dev); |
864 | 873 | ||
865 | /* Interrupt state */ | 874 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) { |
866 | if (HAS_PCH_SPLIT(dev)) { | 875 | /* Interrupt state */ |
867 | I915_WRITE(DEIER, dev_priv->saveDEIER); | 876 | if (HAS_PCH_SPLIT(dev)) { |
868 | I915_WRITE(DEIMR, dev_priv->saveDEIMR); | 877 | I915_WRITE(DEIER, dev_priv->regfile.saveDEIER); |
869 | I915_WRITE(GTIER, dev_priv->saveGTIER); | 878 | I915_WRITE(DEIMR, dev_priv->regfile.saveDEIMR); |
870 | I915_WRITE(GTIMR, dev_priv->saveGTIMR); | 879 | I915_WRITE(GTIER, dev_priv->regfile.saveGTIER); |
871 | I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR); | 880 | I915_WRITE(GTIMR, dev_priv->regfile.saveGTIMR); |
872 | I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR); | 881 | I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR); |
873 | I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG); | 882 | I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR); |
874 | } else { | 883 | I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG); |
875 | I915_WRITE(IER, dev_priv->saveIER); | 884 | } else { |
876 | I915_WRITE(IMR, dev_priv->saveIMR); | 885 | I915_WRITE(IER, dev_priv->regfile.saveIER); |
886 | I915_WRITE(IMR, dev_priv->regfile.saveIMR); | ||
887 | } | ||
877 | } | 888 | } |
878 | 889 | ||
879 | /* Cache mode state */ | 890 | /* Cache mode state */ |
880 | I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); | 891 | I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000); |
881 | 892 | ||
882 | /* Memory arbitration state */ | 893 | /* Memory arbitration state */ |
883 | I915_WRITE(MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000); | 894 | I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000); |
884 | 895 | ||
885 | for (i = 0; i < 16; i++) { | 896 | for (i = 0; i < 16; i++) { |
886 | I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]); | 897 | I915_WRITE(SWF00 + (i << 2), dev_priv->regfile.saveSWF0[i]); |
887 | I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]); | 898 | I915_WRITE(SWF10 + (i << 2), dev_priv->regfile.saveSWF1[i]); |
888 | } | 899 | } |
889 | for (i = 0; i < 3; i++) | 900 | for (i = 0; i < 3; i++) |
890 | I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); | 901 | I915_WRITE(SWF30 + (i << 2), dev_priv->regfile.saveSWF2[i]); |
891 | 902 | ||
892 | mutex_unlock(&dev->struct_mutex); | 903 | mutex_unlock(&dev->struct_mutex); |
893 | 904 | ||
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 903eebd2117a..3bf51d58319d 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c | |||
@@ -162,7 +162,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj, | |||
162 | if (ret) | 162 | if (ret) |
163 | return ret; | 163 | return ret; |
164 | 164 | ||
165 | if (!dev_priv->mm.l3_remap_info) { | 165 | if (!dev_priv->l3_parity.remap_info) { |
166 | temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL); | 166 | temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL); |
167 | if (!temp) { | 167 | if (!temp) { |
168 | mutex_unlock(&drm_dev->struct_mutex); | 168 | mutex_unlock(&drm_dev->struct_mutex); |
@@ -182,9 +182,9 @@ i915_l3_write(struct file *filp, struct kobject *kobj, | |||
182 | * at this point it is left as a TODO. | 182 | * at this point it is left as a TODO. |
183 | */ | 183 | */ |
184 | if (temp) | 184 | if (temp) |
185 | dev_priv->mm.l3_remap_info = temp; | 185 | dev_priv->l3_parity.remap_info = temp; |
186 | 186 | ||
187 | memcpy(dev_priv->mm.l3_remap_info + (offset/4), | 187 | memcpy(dev_priv->l3_parity.remap_info + (offset/4), |
188 | buf + (offset/4), | 188 | buf + (offset/4), |
189 | count); | 189 | count); |
190 | 190 | ||
@@ -211,12 +211,9 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev, | |||
211 | struct drm_i915_private *dev_priv = dev->dev_private; | 211 | struct drm_i915_private *dev_priv = dev->dev_private; |
212 | int ret; | 212 | int ret; |
213 | 213 | ||
214 | ret = i915_mutex_lock_interruptible(dev); | 214 | mutex_lock(&dev_priv->rps.hw_lock); |
215 | if (ret) | ||
216 | return ret; | ||
217 | |||
218 | ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER; | 215 | ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER; |
219 | mutex_unlock(&dev->struct_mutex); | 216 | mutex_unlock(&dev_priv->rps.hw_lock); |
220 | 217 | ||
221 | return snprintf(buf, PAGE_SIZE, "%d", ret); | 218 | return snprintf(buf, PAGE_SIZE, "%d", ret); |
222 | } | 219 | } |
@@ -228,12 +225,9 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute | |||
228 | struct drm_i915_private *dev_priv = dev->dev_private; | 225 | struct drm_i915_private *dev_priv = dev->dev_private; |
229 | int ret; | 226 | int ret; |
230 | 227 | ||
231 | ret = i915_mutex_lock_interruptible(dev); | 228 | mutex_lock(&dev_priv->rps.hw_lock); |
232 | if (ret) | ||
233 | return ret; | ||
234 | |||
235 | ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; | 229 | ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; |
236 | mutex_unlock(&dev->struct_mutex); | 230 | mutex_unlock(&dev_priv->rps.hw_lock); |
237 | 231 | ||
238 | return snprintf(buf, PAGE_SIZE, "%d", ret); | 232 | return snprintf(buf, PAGE_SIZE, "%d", ret); |
239 | } | 233 | } |
@@ -254,16 +248,14 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, | |||
254 | 248 | ||
255 | val /= GT_FREQUENCY_MULTIPLIER; | 249 | val /= GT_FREQUENCY_MULTIPLIER; |
256 | 250 | ||
257 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 251 | mutex_lock(&dev_priv->rps.hw_lock); |
258 | if (ret) | ||
259 | return ret; | ||
260 | 252 | ||
261 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | 253 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
262 | hw_max = (rp_state_cap & 0xff); | 254 | hw_max = (rp_state_cap & 0xff); |
263 | hw_min = ((rp_state_cap & 0xff0000) >> 16); | 255 | hw_min = ((rp_state_cap & 0xff0000) >> 16); |
264 | 256 | ||
265 | if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) { | 257 | if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) { |
266 | mutex_unlock(&dev->struct_mutex); | 258 | mutex_unlock(&dev_priv->rps.hw_lock); |
267 | return -EINVAL; | 259 | return -EINVAL; |
268 | } | 260 | } |
269 | 261 | ||
@@ -272,7 +264,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, | |||
272 | 264 | ||
273 | dev_priv->rps.max_delay = val; | 265 | dev_priv->rps.max_delay = val; |
274 | 266 | ||
275 | mutex_unlock(&dev->struct_mutex); | 267 | mutex_unlock(&dev_priv->rps.hw_lock); |
276 | 268 | ||
277 | return count; | 269 | return count; |
278 | } | 270 | } |
@@ -284,12 +276,9 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute | |||
284 | struct drm_i915_private *dev_priv = dev->dev_private; | 276 | struct drm_i915_private *dev_priv = dev->dev_private; |
285 | int ret; | 277 | int ret; |
286 | 278 | ||
287 | ret = i915_mutex_lock_interruptible(dev); | 279 | mutex_lock(&dev_priv->rps.hw_lock); |
288 | if (ret) | ||
289 | return ret; | ||
290 | |||
291 | ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; | 280 | ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; |
292 | mutex_unlock(&dev->struct_mutex); | 281 | mutex_unlock(&dev_priv->rps.hw_lock); |
293 | 282 | ||
294 | return snprintf(buf, PAGE_SIZE, "%d", ret); | 283 | return snprintf(buf, PAGE_SIZE, "%d", ret); |
295 | } | 284 | } |
@@ -310,16 +299,14 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, | |||
310 | 299 | ||
311 | val /= GT_FREQUENCY_MULTIPLIER; | 300 | val /= GT_FREQUENCY_MULTIPLIER; |
312 | 301 | ||
313 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 302 | mutex_lock(&dev_priv->rps.hw_lock); |
314 | if (ret) | ||
315 | return ret; | ||
316 | 303 | ||
317 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | 304 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
318 | hw_max = (rp_state_cap & 0xff); | 305 | hw_max = (rp_state_cap & 0xff); |
319 | hw_min = ((rp_state_cap & 0xff0000) >> 16); | 306 | hw_min = ((rp_state_cap & 0xff0000) >> 16); |
320 | 307 | ||
321 | if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) { | 308 | if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) { |
322 | mutex_unlock(&dev->struct_mutex); | 309 | mutex_unlock(&dev_priv->rps.hw_lock); |
323 | return -EINVAL; | 310 | return -EINVAL; |
324 | } | 311 | } |
325 | 312 | ||
@@ -328,7 +315,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, | |||
328 | 315 | ||
329 | dev_priv->rps.min_delay = val; | 316 | dev_priv->rps.min_delay = val; |
330 | 317 | ||
331 | mutex_unlock(&dev->struct_mutex); | 318 | mutex_unlock(&dev_priv->rps.hw_lock); |
332 | 319 | ||
333 | return count; | 320 | return count; |
334 | 321 | ||
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 8134421b89a6..3db4a6817713 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h | |||
@@ -229,24 +229,26 @@ TRACE_EVENT(i915_gem_evict_everything, | |||
229 | ); | 229 | ); |
230 | 230 | ||
231 | TRACE_EVENT(i915_gem_ring_dispatch, | 231 | TRACE_EVENT(i915_gem_ring_dispatch, |
232 | TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), | 232 | TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags), |
233 | TP_ARGS(ring, seqno), | 233 | TP_ARGS(ring, seqno, flags), |
234 | 234 | ||
235 | TP_STRUCT__entry( | 235 | TP_STRUCT__entry( |
236 | __field(u32, dev) | 236 | __field(u32, dev) |
237 | __field(u32, ring) | 237 | __field(u32, ring) |
238 | __field(u32, seqno) | 238 | __field(u32, seqno) |
239 | __field(u32, flags) | ||
239 | ), | 240 | ), |
240 | 241 | ||
241 | TP_fast_assign( | 242 | TP_fast_assign( |
242 | __entry->dev = ring->dev->primary->index; | 243 | __entry->dev = ring->dev->primary->index; |
243 | __entry->ring = ring->id; | 244 | __entry->ring = ring->id; |
244 | __entry->seqno = seqno; | 245 | __entry->seqno = seqno; |
246 | __entry->flags = flags; | ||
245 | i915_trace_irq_get(ring, seqno); | 247 | i915_trace_irq_get(ring, seqno); |
246 | ), | 248 | ), |
247 | 249 | ||
248 | TP_printk("dev=%u, ring=%u, seqno=%u", | 250 | TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", |
249 | __entry->dev, __entry->ring, __entry->seqno) | 251 | __entry->dev, __entry->ring, __entry->seqno, __entry->flags) |
250 | ); | 252 | ); |
251 | 253 | ||
252 | TRACE_EVENT(i915_gem_ring_flush, | 254 | TRACE_EVENT(i915_gem_ring_flush, |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index b726b478a4f5..62a5b1154762 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -221,14 +221,20 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, | |||
221 | struct drm_i915_private *dev_priv = dev->dev_private; | 221 | struct drm_i915_private *dev_priv = dev->dev_private; |
222 | u32 adpa; | 222 | u32 adpa; |
223 | 223 | ||
224 | adpa = ADPA_HOTPLUG_BITS; | 224 | if (HAS_PCH_SPLIT(dev)) |
225 | adpa = ADPA_HOTPLUG_BITS; | ||
226 | else | ||
227 | adpa = 0; | ||
228 | |||
225 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | 229 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
226 | adpa |= ADPA_HSYNC_ACTIVE_HIGH; | 230 | adpa |= ADPA_HSYNC_ACTIVE_HIGH; |
227 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | 231 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
228 | adpa |= ADPA_VSYNC_ACTIVE_HIGH; | 232 | adpa |= ADPA_VSYNC_ACTIVE_HIGH; |
229 | 233 | ||
230 | /* For CPT allow 3 pipe config, for others just use A or B */ | 234 | /* For CPT allow 3 pipe config, for others just use A or B */ |
231 | if (HAS_PCH_CPT(dev)) | 235 | if (HAS_PCH_LPT(dev)) |
236 | ; /* Those bits don't exist here */ | ||
237 | else if (HAS_PCH_CPT(dev)) | ||
232 | adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); | 238 | adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); |
233 | else if (intel_crtc->pipe == 0) | 239 | else if (intel_crtc->pipe == 0) |
234 | adpa |= ADPA_PIPE_A_SELECT; | 240 | adpa |= ADPA_PIPE_A_SELECT; |
@@ -401,12 +407,16 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector, | |||
401 | struct i2c_adapter *adapter) | 407 | struct i2c_adapter *adapter) |
402 | { | 408 | { |
403 | struct edid *edid; | 409 | struct edid *edid; |
410 | int ret; | ||
404 | 411 | ||
405 | edid = intel_crt_get_edid(connector, adapter); | 412 | edid = intel_crt_get_edid(connector, adapter); |
406 | if (!edid) | 413 | if (!edid) |
407 | return 0; | 414 | return 0; |
408 | 415 | ||
409 | return intel_connector_update_modes(connector, edid); | 416 | ret = intel_connector_update_modes(connector, edid); |
417 | kfree(edid); | ||
418 | |||
419 | return ret; | ||
410 | } | 420 | } |
411 | 421 | ||
412 | static bool intel_crt_detect_ddc(struct drm_connector *connector) | 422 | static bool intel_crt_detect_ddc(struct drm_connector *connector) |
@@ -644,10 +654,22 @@ static int intel_crt_set_property(struct drm_connector *connector, | |||
644 | static void intel_crt_reset(struct drm_connector *connector) | 654 | static void intel_crt_reset(struct drm_connector *connector) |
645 | { | 655 | { |
646 | struct drm_device *dev = connector->dev; | 656 | struct drm_device *dev = connector->dev; |
657 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
647 | struct intel_crt *crt = intel_attached_crt(connector); | 658 | struct intel_crt *crt = intel_attached_crt(connector); |
648 | 659 | ||
649 | if (HAS_PCH_SPLIT(dev)) | 660 | if (HAS_PCH_SPLIT(dev)) { |
661 | u32 adpa; | ||
662 | |||
663 | adpa = I915_READ(PCH_ADPA); | ||
664 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; | ||
665 | adpa |= ADPA_HOTPLUG_BITS; | ||
666 | I915_WRITE(PCH_ADPA, adpa); | ||
667 | POSTING_READ(PCH_ADPA); | ||
668 | |||
669 | DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa); | ||
650 | crt->force_hotplug_required = 1; | 670 | crt->force_hotplug_required = 1; |
671 | } | ||
672 | |||
651 | } | 673 | } |
652 | 674 | ||
653 | /* | 675 | /* |
@@ -749,7 +771,10 @@ void intel_crt_init(struct drm_device *dev) | |||
749 | 771 | ||
750 | crt->base.disable = intel_disable_crt; | 772 | crt->base.disable = intel_disable_crt; |
751 | crt->base.enable = intel_enable_crt; | 773 | crt->base.enable = intel_enable_crt; |
752 | crt->base.get_hw_state = intel_crt_get_hw_state; | 774 | if (IS_HASWELL(dev)) |
775 | crt->base.get_hw_state = intel_ddi_get_hw_state; | ||
776 | else | ||
777 | crt->base.get_hw_state = intel_crt_get_hw_state; | ||
753 | intel_connector->get_hw_state = intel_connector_get_hw_state; | 778 | intel_connector->get_hw_state = intel_connector_get_hw_state; |
754 | 779 | ||
755 | drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs); | 780 | drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs); |
@@ -766,18 +791,6 @@ void intel_crt_init(struct drm_device *dev) | |||
766 | * Configure the automatic hotplug detection stuff | 791 | * Configure the automatic hotplug detection stuff |
767 | */ | 792 | */ |
768 | crt->force_hotplug_required = 0; | 793 | crt->force_hotplug_required = 0; |
769 | if (HAS_PCH_SPLIT(dev)) { | ||
770 | u32 adpa; | ||
771 | |||
772 | adpa = I915_READ(PCH_ADPA); | ||
773 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; | ||
774 | adpa |= ADPA_HOTPLUG_BITS; | ||
775 | I915_WRITE(PCH_ADPA, adpa); | ||
776 | POSTING_READ(PCH_ADPA); | ||
777 | |||
778 | DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa); | ||
779 | crt->force_hotplug_required = 1; | ||
780 | } | ||
781 | 794 | ||
782 | dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; | 795 | dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; |
783 | } | 796 | } |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index bfe375466a0e..58f50ebdbef6 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -58,6 +58,26 @@ static const u32 hsw_ddi_translations_fdi[] = { | |||
58 | 0x00FFFFFF, 0x00040006 /* HDMI parameters */ | 58 | 0x00FFFFFF, 0x00040006 /* HDMI parameters */ |
59 | }; | 59 | }; |
60 | 60 | ||
61 | static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) | ||
62 | { | ||
63 | struct drm_encoder *encoder = &intel_encoder->base; | ||
64 | int type = intel_encoder->type; | ||
65 | |||
66 | if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || | ||
67 | type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) { | ||
68 | struct intel_digital_port *intel_dig_port = | ||
69 | enc_to_dig_port(encoder); | ||
70 | return intel_dig_port->port; | ||
71 | |||
72 | } else if (type == INTEL_OUTPUT_ANALOG) { | ||
73 | return PORT_E; | ||
74 | |||
75 | } else { | ||
76 | DRM_ERROR("Invalid DDI encoder type %d\n", type); | ||
77 | BUG(); | ||
78 | } | ||
79 | } | ||
80 | |||
61 | /* On Haswell, DDI port buffers must be programmed with correct values | 81 | /* On Haswell, DDI port buffers must be programmed with correct values |
62 | * in advance. The buffer values are different for FDI and DP modes, | 82 | * in advance. The buffer values are different for FDI and DP modes, |
63 | * but the HDMI/DVI fields are shared among those. So we program the DDI | 83 | * but the HDMI/DVI fields are shared among those. So we program the DDI |
@@ -133,25 +153,34 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) | |||
133 | struct drm_device *dev = crtc->dev; | 153 | struct drm_device *dev = crtc->dev; |
134 | struct drm_i915_private *dev_priv = dev->dev_private; | 154 | struct drm_i915_private *dev_priv = dev->dev_private; |
135 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 155 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
136 | int pipe = intel_crtc->pipe; | 156 | u32 temp, i, rx_ctl_val; |
137 | u32 reg, temp, i; | ||
138 | |||
139 | /* Configure CPU PLL, wait for warmup */ | ||
140 | I915_WRITE(SPLL_CTL, | ||
141 | SPLL_PLL_ENABLE | | ||
142 | SPLL_PLL_FREQ_1350MHz | | ||
143 | SPLL_PLL_SCC); | ||
144 | 157 | ||
145 | /* Use SPLL to drive the output when in FDI mode */ | 158 | /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the |
146 | I915_WRITE(PORT_CLK_SEL(PORT_E), | 159 | * mode set "sequence for CRT port" document: |
147 | PORT_CLK_SEL_SPLL); | 160 | * - TP1 to TP2 time with the default value |
148 | I915_WRITE(PIPE_CLK_SEL(pipe), | 161 | * - FDI delay to 90h |
149 | PIPE_CLK_SEL_PORT(PORT_E)); | 162 | */ |
150 | 163 | I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) | | |
151 | udelay(20); | 164 | FDI_RX_PWRDN_LANE0_VAL(2) | |
152 | 165 | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); | |
153 | /* Start the training iterating through available voltages and emphasis */ | 166 | |
154 | for (i=0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values); i++) { | 167 | /* Enable the PCH Receiver FDI PLL */ |
168 | rx_ctl_val = FDI_RX_PLL_ENABLE | FDI_RX_ENHANCE_FRAME_ENABLE | | ||
169 | ((intel_crtc->fdi_lanes - 1) << 19); | ||
170 | I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); | ||
171 | POSTING_READ(_FDI_RXA_CTL); | ||
172 | udelay(220); | ||
173 | |||
174 | /* Switch from Rawclk to PCDclk */ | ||
175 | rx_ctl_val |= FDI_PCDCLK; | ||
176 | I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); | ||
177 | |||
178 | /* Configure Port Clock Select */ | ||
179 | I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel); | ||
180 | |||
181 | /* Start the training iterating through available voltages and emphasis, | ||
182 | * testing each value twice. */ | ||
183 | for (i = 0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values) * 2; i++) { | ||
155 | /* Configure DP_TP_CTL with auto-training */ | 184 | /* Configure DP_TP_CTL with auto-training */ |
156 | I915_WRITE(DP_TP_CTL(PORT_E), | 185 | I915_WRITE(DP_TP_CTL(PORT_E), |
157 | DP_TP_CTL_FDI_AUTOTRAIN | | 186 | DP_TP_CTL_FDI_AUTOTRAIN | |
@@ -160,103 +189,63 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) | |||
160 | DP_TP_CTL_ENABLE); | 189 | DP_TP_CTL_ENABLE); |
161 | 190 | ||
162 | /* Configure and enable DDI_BUF_CTL for DDI E with next voltage */ | 191 | /* Configure and enable DDI_BUF_CTL for DDI E with next voltage */ |
163 | temp = I915_READ(DDI_BUF_CTL(PORT_E)); | ||
164 | temp = (temp & ~DDI_BUF_EMP_MASK); | ||
165 | I915_WRITE(DDI_BUF_CTL(PORT_E), | 192 | I915_WRITE(DDI_BUF_CTL(PORT_E), |
166 | temp | | 193 | DDI_BUF_CTL_ENABLE | |
167 | DDI_BUF_CTL_ENABLE | | 194 | ((intel_crtc->fdi_lanes - 1) << 1) | |
168 | DDI_PORT_WIDTH_X2 | | 195 | hsw_ddi_buf_ctl_values[i / 2]); |
169 | hsw_ddi_buf_ctl_values[i]); | 196 | POSTING_READ(DDI_BUF_CTL(PORT_E)); |
170 | 197 | ||
171 | udelay(600); | 198 | udelay(600); |
172 | 199 | ||
173 | /* We need to program FDI_RX_MISC with the default TP1 to TP2 | 200 | /* Program PCH FDI Receiver TU */ |
174 | * values before enabling the receiver, and configure the delay | 201 | I915_WRITE(_FDI_RXA_TUSIZE1, TU_SIZE(64)); |
175 | * for the FDI timing generator to 90h. Luckily, all the other | 202 | |
176 | * bits are supposed to be zeroed, so we can write those values | 203 | /* Enable PCH FDI Receiver with auto-training */ |
177 | * directly. | 204 | rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO; |
178 | */ | 205 | I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); |
179 | I915_WRITE(FDI_RX_MISC(pipe), FDI_RX_TP1_TO_TP2_48 | | 206 | POSTING_READ(_FDI_RXA_CTL); |
180 | FDI_RX_FDI_DELAY_90); | 207 | |
181 | 208 | /* Wait for FDI receiver lane calibration */ | |
182 | /* Enable CPU FDI Receiver with auto-training */ | 209 | udelay(30); |
183 | reg = FDI_RX_CTL(pipe); | 210 | |
184 | I915_WRITE(reg, | 211 | /* Unset FDI_RX_MISC pwrdn lanes */ |
185 | I915_READ(reg) | | 212 | temp = I915_READ(_FDI_RXA_MISC); |
186 | FDI_LINK_TRAIN_AUTO | | 213 | temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); |
187 | FDI_RX_ENABLE | | 214 | I915_WRITE(_FDI_RXA_MISC, temp); |
188 | FDI_LINK_TRAIN_PATTERN_1_CPT | | 215 | POSTING_READ(_FDI_RXA_MISC); |
189 | FDI_RX_ENHANCE_FRAME_ENABLE | | 216 | |
190 | FDI_PORT_WIDTH_2X_LPT | | 217 | /* Wait for FDI auto training time */ |
191 | FDI_RX_PLL_ENABLE); | 218 | udelay(5); |
192 | POSTING_READ(reg); | ||
193 | udelay(100); | ||
194 | 219 | ||
195 | temp = I915_READ(DP_TP_STATUS(PORT_E)); | 220 | temp = I915_READ(DP_TP_STATUS(PORT_E)); |
196 | if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) { | 221 | if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) { |
197 | DRM_DEBUG_DRIVER("BUF_CTL training done on %d step\n", i); | 222 | DRM_DEBUG_KMS("FDI link training done on step %d\n", i); |
198 | 223 | ||
199 | /* Enable normal pixel sending for FDI */ | 224 | /* Enable normal pixel sending for FDI */ |
200 | I915_WRITE(DP_TP_CTL(PORT_E), | 225 | I915_WRITE(DP_TP_CTL(PORT_E), |
201 | DP_TP_CTL_FDI_AUTOTRAIN | | 226 | DP_TP_CTL_FDI_AUTOTRAIN | |
202 | DP_TP_CTL_LINK_TRAIN_NORMAL | | 227 | DP_TP_CTL_LINK_TRAIN_NORMAL | |
203 | DP_TP_CTL_ENHANCED_FRAME_ENABLE | | 228 | DP_TP_CTL_ENHANCED_FRAME_ENABLE | |
204 | DP_TP_CTL_ENABLE); | 229 | DP_TP_CTL_ENABLE); |
205 | |||
206 | /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in FDI mode */ | ||
207 | temp = I915_READ(DDI_FUNC_CTL(pipe)); | ||
208 | temp &= ~PIPE_DDI_PORT_MASK; | ||
209 | temp |= PIPE_DDI_SELECT_PORT(PORT_E) | | ||
210 | PIPE_DDI_MODE_SELECT_FDI | | ||
211 | PIPE_DDI_FUNC_ENABLE | | ||
212 | PIPE_DDI_PORT_WIDTH_X2; | ||
213 | I915_WRITE(DDI_FUNC_CTL(pipe), | ||
214 | temp); | ||
215 | break; | ||
216 | } else { | ||
217 | DRM_ERROR("Error training BUF_CTL %d\n", i); | ||
218 | 230 | ||
219 | /* Disable DP_TP_CTL and FDI_RX_CTL) and retry */ | 231 | return; |
220 | I915_WRITE(DP_TP_CTL(PORT_E), | ||
221 | I915_READ(DP_TP_CTL(PORT_E)) & | ||
222 | ~DP_TP_CTL_ENABLE); | ||
223 | I915_WRITE(FDI_RX_CTL(pipe), | ||
224 | I915_READ(FDI_RX_CTL(pipe)) & | ||
225 | ~FDI_RX_PLL_ENABLE); | ||
226 | continue; | ||
227 | } | 232 | } |
228 | } | ||
229 | 233 | ||
230 | DRM_DEBUG_KMS("FDI train done.\n"); | 234 | /* Disable DP_TP_CTL and FDI_RX_CTL and retry */ |
231 | } | 235 | I915_WRITE(DP_TP_CTL(PORT_E), |
236 | I915_READ(DP_TP_CTL(PORT_E)) & ~DP_TP_CTL_ENABLE); | ||
232 | 237 | ||
233 | /* For DDI connections, it is possible to support different outputs over the | 238 | rx_ctl_val &= ~FDI_RX_ENABLE; |
234 | * same DDI port, such as HDMI or DP or even VGA via FDI. So we don't know by | 239 | I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); |
235 | * the time the output is detected what exactly is on the other end of it. This | ||
236 | * function aims at providing support for this detection and proper output | ||
237 | * configuration. | ||
238 | */ | ||
239 | void intel_ddi_init(struct drm_device *dev, enum port port) | ||
240 | { | ||
241 | /* For now, we don't do any proper output detection and assume that we | ||
242 | * handle HDMI only */ | ||
243 | 240 | ||
244 | switch(port){ | 241 | /* Reset FDI_RX_MISC pwrdn lanes */ |
245 | case PORT_A: | 242 | temp = I915_READ(_FDI_RXA_MISC); |
246 | /* We don't handle eDP and DP yet */ | 243 | temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); |
247 | DRM_DEBUG_DRIVER("Found digital output on DDI port A\n"); | 244 | temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); |
248 | break; | 245 | I915_WRITE(_FDI_RXA_MISC, temp); |
249 | /* Assume that the ports B, C and D are working in HDMI mode for now */ | ||
250 | case PORT_B: | ||
251 | case PORT_C: | ||
252 | case PORT_D: | ||
253 | intel_hdmi_init(dev, DDI_BUF_CTL(port), port); | ||
254 | break; | ||
255 | default: | ||
256 | DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n", | ||
257 | port); | ||
258 | break; | ||
259 | } | 246 | } |
247 | |||
248 | DRM_ERROR("FDI link training failed!\n"); | ||
260 | } | 249 | } |
261 | 250 | ||
262 | /* WRPLL clock dividers */ | 251 | /* WRPLL clock dividers */ |
@@ -645,116 +634,426 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = { | |||
645 | {298000, 2, 21, 19}, | 634 | {298000, 2, 21, 19}, |
646 | }; | 635 | }; |
647 | 636 | ||
648 | void intel_ddi_mode_set(struct drm_encoder *encoder, | 637 | static void intel_ddi_mode_set(struct drm_encoder *encoder, |
649 | struct drm_display_mode *mode, | 638 | struct drm_display_mode *mode, |
650 | struct drm_display_mode *adjusted_mode) | 639 | struct drm_display_mode *adjusted_mode) |
651 | { | 640 | { |
652 | struct drm_device *dev = encoder->dev; | ||
653 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
654 | struct drm_crtc *crtc = encoder->crtc; | 641 | struct drm_crtc *crtc = encoder->crtc; |
655 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 642 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
656 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | 643 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); |
657 | int port = intel_hdmi->ddi_port; | 644 | int port = intel_ddi_get_encoder_port(intel_encoder); |
658 | int pipe = intel_crtc->pipe; | 645 | int pipe = intel_crtc->pipe; |
659 | int p, n2, r2; | 646 | int type = intel_encoder->type; |
660 | u32 temp, i; | ||
661 | 647 | ||
662 | /* On Haswell, we need to enable the clocks and prepare DDI function to | 648 | DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n", |
663 | * work in HDMI mode for this pipe. | 649 | port_name(port), pipe_name(pipe)); |
664 | */ | 650 | |
665 | DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe)); | 651 | if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { |
652 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | ||
653 | |||
654 | intel_dp->DP = DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; | ||
655 | switch (intel_dp->lane_count) { | ||
656 | case 1: | ||
657 | intel_dp->DP |= DDI_PORT_WIDTH_X1; | ||
658 | break; | ||
659 | case 2: | ||
660 | intel_dp->DP |= DDI_PORT_WIDTH_X2; | ||
661 | break; | ||
662 | case 4: | ||
663 | intel_dp->DP |= DDI_PORT_WIDTH_X4; | ||
664 | break; | ||
665 | default: | ||
666 | intel_dp->DP |= DDI_PORT_WIDTH_X4; | ||
667 | WARN(1, "Unexpected DP lane count %d\n", | ||
668 | intel_dp->lane_count); | ||
669 | break; | ||
670 | } | ||
671 | |||
672 | intel_dp_init_link_config(intel_dp); | ||
673 | |||
674 | } else if (type == INTEL_OUTPUT_HDMI) { | ||
675 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | ||
676 | |||
677 | if (intel_hdmi->has_audio) { | ||
678 | /* Proper support for digital audio needs a new logic | ||
679 | * and a new set of registers, so we leave it for future | ||
680 | * patch bombing. | ||
681 | */ | ||
682 | DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n", | ||
683 | pipe_name(intel_crtc->pipe)); | ||
684 | |||
685 | /* write eld */ | ||
686 | DRM_DEBUG_DRIVER("HDMI audio: write eld information\n"); | ||
687 | intel_write_eld(encoder, adjusted_mode); | ||
688 | } | ||
689 | |||
690 | intel_hdmi->set_infoframes(encoder, adjusted_mode); | ||
691 | } | ||
692 | } | ||
693 | |||
694 | static struct intel_encoder * | ||
695 | intel_ddi_get_crtc_encoder(struct drm_crtc *crtc) | ||
696 | { | ||
697 | struct drm_device *dev = crtc->dev; | ||
698 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
699 | struct intel_encoder *intel_encoder, *ret = NULL; | ||
700 | int num_encoders = 0; | ||
701 | |||
702 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { | ||
703 | ret = intel_encoder; | ||
704 | num_encoders++; | ||
705 | } | ||
706 | |||
707 | if (num_encoders != 1) | ||
708 | WARN(1, "%d encoders on crtc for pipe %d\n", num_encoders, | ||
709 | intel_crtc->pipe); | ||
710 | |||
711 | BUG_ON(ret == NULL); | ||
712 | return ret; | ||
713 | } | ||
714 | |||
715 | void intel_ddi_put_crtc_pll(struct drm_crtc *crtc) | ||
716 | { | ||
717 | struct drm_i915_private *dev_priv = crtc->dev->dev_private; | ||
718 | struct intel_ddi_plls *plls = &dev_priv->ddi_plls; | ||
719 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
720 | uint32_t val; | ||
721 | |||
722 | switch (intel_crtc->ddi_pll_sel) { | ||
723 | case PORT_CLK_SEL_SPLL: | ||
724 | plls->spll_refcount--; | ||
725 | if (plls->spll_refcount == 0) { | ||
726 | DRM_DEBUG_KMS("Disabling SPLL\n"); | ||
727 | val = I915_READ(SPLL_CTL); | ||
728 | WARN_ON(!(val & SPLL_PLL_ENABLE)); | ||
729 | I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE); | ||
730 | POSTING_READ(SPLL_CTL); | ||
731 | } | ||
732 | break; | ||
733 | case PORT_CLK_SEL_WRPLL1: | ||
734 | plls->wrpll1_refcount--; | ||
735 | if (plls->wrpll1_refcount == 0) { | ||
736 | DRM_DEBUG_KMS("Disabling WRPLL 1\n"); | ||
737 | val = I915_READ(WRPLL_CTL1); | ||
738 | WARN_ON(!(val & WRPLL_PLL_ENABLE)); | ||
739 | I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE); | ||
740 | POSTING_READ(WRPLL_CTL1); | ||
741 | } | ||
742 | break; | ||
743 | case PORT_CLK_SEL_WRPLL2: | ||
744 | plls->wrpll2_refcount--; | ||
745 | if (plls->wrpll2_refcount == 0) { | ||
746 | DRM_DEBUG_KMS("Disabling WRPLL 2\n"); | ||
747 | val = I915_READ(WRPLL_CTL2); | ||
748 | WARN_ON(!(val & WRPLL_PLL_ENABLE)); | ||
749 | I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE); | ||
750 | POSTING_READ(WRPLL_CTL2); | ||
751 | } | ||
752 | break; | ||
753 | } | ||
754 | |||
755 | WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n"); | ||
756 | WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n"); | ||
757 | WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n"); | ||
758 | |||
759 | intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE; | ||
760 | } | ||
761 | |||
762 | static void intel_ddi_calculate_wrpll(int clock, int *p, int *n2, int *r2) | ||
763 | { | ||
764 | u32 i; | ||
666 | 765 | ||
667 | for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) | 766 | for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) |
668 | if (crtc->mode.clock <= wrpll_tmds_clock_table[i].clock) | 767 | if (clock <= wrpll_tmds_clock_table[i].clock) |
669 | break; | 768 | break; |
670 | 769 | ||
671 | if (i == ARRAY_SIZE(wrpll_tmds_clock_table)) | 770 | if (i == ARRAY_SIZE(wrpll_tmds_clock_table)) |
672 | i--; | 771 | i--; |
673 | 772 | ||
674 | p = wrpll_tmds_clock_table[i].p; | 773 | *p = wrpll_tmds_clock_table[i].p; |
675 | n2 = wrpll_tmds_clock_table[i].n2; | 774 | *n2 = wrpll_tmds_clock_table[i].n2; |
676 | r2 = wrpll_tmds_clock_table[i].r2; | 775 | *r2 = wrpll_tmds_clock_table[i].r2; |
677 | 776 | ||
678 | if (wrpll_tmds_clock_table[i].clock != crtc->mode.clock) | 777 | if (wrpll_tmds_clock_table[i].clock != clock) |
679 | DRM_INFO("WR PLL: using settings for %dKHz on %dKHz mode\n", | 778 | DRM_INFO("WRPLL: using settings for %dKHz on %dKHz mode\n", |
680 | wrpll_tmds_clock_table[i].clock, crtc->mode.clock); | 779 | wrpll_tmds_clock_table[i].clock, clock); |
681 | 780 | ||
682 | DRM_DEBUG_KMS("WR PLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n", | 781 | DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n", |
683 | crtc->mode.clock, p, n2, r2); | 782 | clock, *p, *n2, *r2); |
783 | } | ||
684 | 784 | ||
685 | /* Enable LCPLL if disabled */ | 785 | bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock) |
686 | temp = I915_READ(LCPLL_CTL); | 786 | { |
687 | if (temp & LCPLL_PLL_DISABLE) | 787 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
688 | I915_WRITE(LCPLL_CTL, | 788 | struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); |
689 | temp & ~LCPLL_PLL_DISABLE); | 789 | struct drm_encoder *encoder = &intel_encoder->base; |
790 | struct drm_i915_private *dev_priv = crtc->dev->dev_private; | ||
791 | struct intel_ddi_plls *plls = &dev_priv->ddi_plls; | ||
792 | int type = intel_encoder->type; | ||
793 | enum pipe pipe = intel_crtc->pipe; | ||
794 | uint32_t reg, val; | ||
690 | 795 | ||
691 | /* Configure WR PLL 1, program the correct divider values for | 796 | /* TODO: reuse PLLs when possible (compare values) */ |
692 | * the desired frequency and wait for warmup */ | ||
693 | I915_WRITE(WRPLL_CTL1, | ||
694 | WRPLL_PLL_ENABLE | | ||
695 | WRPLL_PLL_SELECT_LCPLL_2700 | | ||
696 | WRPLL_DIVIDER_REFERENCE(r2) | | ||
697 | WRPLL_DIVIDER_FEEDBACK(n2) | | ||
698 | WRPLL_DIVIDER_POST(p)); | ||
699 | 797 | ||
700 | udelay(20); | 798 | intel_ddi_put_crtc_pll(crtc); |
701 | 799 | ||
702 | /* Use WRPLL1 clock to drive the output to the port, and tell the pipe to use | 800 | if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { |
703 | * this port for connection. | 801 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
704 | */ | 802 | |
705 | I915_WRITE(PORT_CLK_SEL(port), | 803 | switch (intel_dp->link_bw) { |
706 | PORT_CLK_SEL_WRPLL1); | 804 | case DP_LINK_BW_1_62: |
707 | I915_WRITE(PIPE_CLK_SEL(pipe), | 805 | intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810; |
708 | PIPE_CLK_SEL_PORT(port)); | 806 | break; |
807 | case DP_LINK_BW_2_7: | ||
808 | intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350; | ||
809 | break; | ||
810 | case DP_LINK_BW_5_4: | ||
811 | intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700; | ||
812 | break; | ||
813 | default: | ||
814 | DRM_ERROR("Link bandwidth %d unsupported\n", | ||
815 | intel_dp->link_bw); | ||
816 | return false; | ||
817 | } | ||
818 | |||
819 | /* We don't need to turn any PLL on because we'll use LCPLL. */ | ||
820 | return true; | ||
821 | |||
822 | } else if (type == INTEL_OUTPUT_HDMI) { | ||
823 | int p, n2, r2; | ||
824 | |||
825 | if (plls->wrpll1_refcount == 0) { | ||
826 | DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n", | ||
827 | pipe_name(pipe)); | ||
828 | plls->wrpll1_refcount++; | ||
829 | reg = WRPLL_CTL1; | ||
830 | intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1; | ||
831 | } else if (plls->wrpll2_refcount == 0) { | ||
832 | DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n", | ||
833 | pipe_name(pipe)); | ||
834 | plls->wrpll2_refcount++; | ||
835 | reg = WRPLL_CTL2; | ||
836 | intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2; | ||
837 | } else { | ||
838 | DRM_ERROR("No WRPLLs available!\n"); | ||
839 | return false; | ||
840 | } | ||
841 | |||
842 | WARN(I915_READ(reg) & WRPLL_PLL_ENABLE, | ||
843 | "WRPLL already enabled\n"); | ||
709 | 844 | ||
845 | intel_ddi_calculate_wrpll(clock, &p, &n2, &r2); | ||
846 | |||
847 | val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 | | ||
848 | WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | | ||
849 | WRPLL_DIVIDER_POST(p); | ||
850 | |||
851 | } else if (type == INTEL_OUTPUT_ANALOG) { | ||
852 | if (plls->spll_refcount == 0) { | ||
853 | DRM_DEBUG_KMS("Using SPLL on pipe %c\n", | ||
854 | pipe_name(pipe)); | ||
855 | plls->spll_refcount++; | ||
856 | reg = SPLL_CTL; | ||
857 | intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL; | ||
858 | } | ||
859 | |||
860 | WARN(I915_READ(reg) & SPLL_PLL_ENABLE, | ||
861 | "SPLL already enabled\n"); | ||
862 | |||
863 | val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC; | ||
864 | |||
865 | } else { | ||
866 | WARN(1, "Invalid DDI encoder type %d\n", type); | ||
867 | return false; | ||
868 | } | ||
869 | |||
870 | I915_WRITE(reg, val); | ||
710 | udelay(20); | 871 | udelay(20); |
711 | 872 | ||
712 | if (intel_hdmi->has_audio) { | 873 | return true; |
713 | /* Proper support for digital audio needs a new logic and a new set | 874 | } |
714 | * of registers, so we leave it for future patch bombing. | 875 | |
715 | */ | 876 | void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) |
716 | DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n", | 877 | { |
717 | pipe_name(intel_crtc->pipe)); | 878 | struct drm_i915_private *dev_priv = crtc->dev->dev_private; |
879 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
880 | struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); | ||
881 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; | ||
882 | int type = intel_encoder->type; | ||
883 | uint32_t temp; | ||
718 | 884 | ||
719 | /* write eld */ | 885 | if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { |
720 | DRM_DEBUG_DRIVER("HDMI audio: write eld information\n"); | 886 | |
721 | intel_write_eld(encoder, adjusted_mode); | 887 | temp = TRANS_MSA_SYNC_CLK; |
888 | switch (intel_crtc->bpp) { | ||
889 | case 18: | ||
890 | temp |= TRANS_MSA_6_BPC; | ||
891 | break; | ||
892 | case 24: | ||
893 | temp |= TRANS_MSA_8_BPC; | ||
894 | break; | ||
895 | case 30: | ||
896 | temp |= TRANS_MSA_10_BPC; | ||
897 | break; | ||
898 | case 36: | ||
899 | temp |= TRANS_MSA_12_BPC; | ||
900 | break; | ||
901 | default: | ||
902 | temp |= TRANS_MSA_8_BPC; | ||
903 | WARN(1, "%d bpp unsupported by DDI function\n", | ||
904 | intel_crtc->bpp); | ||
905 | } | ||
906 | I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp); | ||
722 | } | 907 | } |
908 | } | ||
723 | 909 | ||
724 | /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */ | 910 | void intel_ddi_enable_pipe_func(struct drm_crtc *crtc) |
725 | temp = PIPE_DDI_FUNC_ENABLE | PIPE_DDI_SELECT_PORT(port); | 911 | { |
912 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
913 | struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); | ||
914 | struct drm_encoder *encoder = &intel_encoder->base; | ||
915 | struct drm_i915_private *dev_priv = crtc->dev->dev_private; | ||
916 | enum pipe pipe = intel_crtc->pipe; | ||
917 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; | ||
918 | enum port port = intel_ddi_get_encoder_port(intel_encoder); | ||
919 | int type = intel_encoder->type; | ||
920 | uint32_t temp; | ||
921 | |||
922 | /* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */ | ||
923 | temp = TRANS_DDI_FUNC_ENABLE; | ||
924 | temp |= TRANS_DDI_SELECT_PORT(port); | ||
726 | 925 | ||
727 | switch (intel_crtc->bpp) { | 926 | switch (intel_crtc->bpp) { |
728 | case 18: | 927 | case 18: |
729 | temp |= PIPE_DDI_BPC_6; | 928 | temp |= TRANS_DDI_BPC_6; |
730 | break; | 929 | break; |
731 | case 24: | 930 | case 24: |
732 | temp |= PIPE_DDI_BPC_8; | 931 | temp |= TRANS_DDI_BPC_8; |
733 | break; | 932 | break; |
734 | case 30: | 933 | case 30: |
735 | temp |= PIPE_DDI_BPC_10; | 934 | temp |= TRANS_DDI_BPC_10; |
736 | break; | 935 | break; |
737 | case 36: | 936 | case 36: |
738 | temp |= PIPE_DDI_BPC_12; | 937 | temp |= TRANS_DDI_BPC_12; |
739 | break; | 938 | break; |
740 | default: | 939 | default: |
741 | WARN(1, "%d bpp unsupported by pipe DDI function\n", | 940 | WARN(1, "%d bpp unsupported by transcoder DDI function\n", |
742 | intel_crtc->bpp); | 941 | intel_crtc->bpp); |
743 | } | 942 | } |
744 | 943 | ||
745 | if (intel_hdmi->has_hdmi_sink) | 944 | if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) |
746 | temp |= PIPE_DDI_MODE_SELECT_HDMI; | 945 | temp |= TRANS_DDI_PVSYNC; |
946 | if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) | ||
947 | temp |= TRANS_DDI_PHSYNC; | ||
948 | |||
949 | if (cpu_transcoder == TRANSCODER_EDP) { | ||
950 | switch (pipe) { | ||
951 | case PIPE_A: | ||
952 | temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; | ||
953 | break; | ||
954 | case PIPE_B: | ||
955 | temp |= TRANS_DDI_EDP_INPUT_B_ONOFF; | ||
956 | break; | ||
957 | case PIPE_C: | ||
958 | temp |= TRANS_DDI_EDP_INPUT_C_ONOFF; | ||
959 | break; | ||
960 | default: | ||
961 | BUG(); | ||
962 | break; | ||
963 | } | ||
964 | } | ||
965 | |||
966 | if (type == INTEL_OUTPUT_HDMI) { | ||
967 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | ||
968 | |||
969 | if (intel_hdmi->has_hdmi_sink) | ||
970 | temp |= TRANS_DDI_MODE_SELECT_HDMI; | ||
971 | else | ||
972 | temp |= TRANS_DDI_MODE_SELECT_DVI; | ||
973 | |||
974 | } else if (type == INTEL_OUTPUT_ANALOG) { | ||
975 | temp |= TRANS_DDI_MODE_SELECT_FDI; | ||
976 | temp |= (intel_crtc->fdi_lanes - 1) << 1; | ||
977 | |||
978 | } else if (type == INTEL_OUTPUT_DISPLAYPORT || | ||
979 | type == INTEL_OUTPUT_EDP) { | ||
980 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | ||
981 | |||
982 | temp |= TRANS_DDI_MODE_SELECT_DP_SST; | ||
983 | |||
984 | switch (intel_dp->lane_count) { | ||
985 | case 1: | ||
986 | temp |= TRANS_DDI_PORT_WIDTH_X1; | ||
987 | break; | ||
988 | case 2: | ||
989 | temp |= TRANS_DDI_PORT_WIDTH_X2; | ||
990 | break; | ||
991 | case 4: | ||
992 | temp |= TRANS_DDI_PORT_WIDTH_X4; | ||
993 | break; | ||
994 | default: | ||
995 | temp |= TRANS_DDI_PORT_WIDTH_X4; | ||
996 | WARN(1, "Unsupported lane count %d\n", | ||
997 | intel_dp->lane_count); | ||
998 | } | ||
999 | |||
1000 | } else { | ||
1001 | WARN(1, "Invalid encoder type %d for pipe %d\n", | ||
1002 | intel_encoder->type, pipe); | ||
1003 | } | ||
1004 | |||
1005 | I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); | ||
1006 | } | ||
1007 | |||
1008 | void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, | ||
1009 | enum transcoder cpu_transcoder) | ||
1010 | { | ||
1011 | uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); | ||
1012 | uint32_t val = I915_READ(reg); | ||
1013 | |||
1014 | val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK); | ||
1015 | val |= TRANS_DDI_PORT_NONE; | ||
1016 | I915_WRITE(reg, val); | ||
1017 | } | ||
1018 | |||
1019 | bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) | ||
1020 | { | ||
1021 | struct drm_device *dev = intel_connector->base.dev; | ||
1022 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1023 | struct intel_encoder *intel_encoder = intel_connector->encoder; | ||
1024 | int type = intel_connector->base.connector_type; | ||
1025 | enum port port = intel_ddi_get_encoder_port(intel_encoder); | ||
1026 | enum pipe pipe = 0; | ||
1027 | enum transcoder cpu_transcoder; | ||
1028 | uint32_t tmp; | ||
1029 | |||
1030 | if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) | ||
1031 | return false; | ||
1032 | |||
1033 | if (port == PORT_A) | ||
1034 | cpu_transcoder = TRANSCODER_EDP; | ||
747 | else | 1035 | else |
748 | temp |= PIPE_DDI_MODE_SELECT_DVI; | 1036 | cpu_transcoder = pipe; |
1037 | |||
1038 | tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); | ||
749 | 1039 | ||
750 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | 1040 | switch (tmp & TRANS_DDI_MODE_SELECT_MASK) { |
751 | temp |= PIPE_DDI_PVSYNC; | 1041 | case TRANS_DDI_MODE_SELECT_HDMI: |
752 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | 1042 | case TRANS_DDI_MODE_SELECT_DVI: |
753 | temp |= PIPE_DDI_PHSYNC; | 1043 | return (type == DRM_MODE_CONNECTOR_HDMIA); |
1044 | |||
1045 | case TRANS_DDI_MODE_SELECT_DP_SST: | ||
1046 | if (type == DRM_MODE_CONNECTOR_eDP) | ||
1047 | return true; | ||
1048 | case TRANS_DDI_MODE_SELECT_DP_MST: | ||
1049 | return (type == DRM_MODE_CONNECTOR_DisplayPort); | ||
754 | 1050 | ||
755 | I915_WRITE(DDI_FUNC_CTL(pipe), temp); | 1051 | case TRANS_DDI_MODE_SELECT_FDI: |
1052 | return (type == DRM_MODE_CONNECTOR_VGA); | ||
756 | 1053 | ||
757 | intel_hdmi->set_infoframes(encoder, adjusted_mode); | 1054 | default: |
1055 | return false; | ||
1056 | } | ||
758 | } | 1057 | } |
759 | 1058 | ||
760 | bool intel_ddi_get_hw_state(struct intel_encoder *encoder, | 1059 | bool intel_ddi_get_hw_state(struct intel_encoder *encoder, |
@@ -762,58 +1061,430 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, | |||
762 | { | 1061 | { |
763 | struct drm_device *dev = encoder->base.dev; | 1062 | struct drm_device *dev = encoder->base.dev; |
764 | struct drm_i915_private *dev_priv = dev->dev_private; | 1063 | struct drm_i915_private *dev_priv = dev->dev_private; |
765 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); | 1064 | enum port port = intel_ddi_get_encoder_port(encoder); |
766 | u32 tmp; | 1065 | u32 tmp; |
767 | int i; | 1066 | int i; |
768 | 1067 | ||
769 | tmp = I915_READ(DDI_BUF_CTL(intel_hdmi->ddi_port)); | 1068 | tmp = I915_READ(DDI_BUF_CTL(port)); |
770 | 1069 | ||
771 | if (!(tmp & DDI_BUF_CTL_ENABLE)) | 1070 | if (!(tmp & DDI_BUF_CTL_ENABLE)) |
772 | return false; | 1071 | return false; |
773 | 1072 | ||
774 | for_each_pipe(i) { | 1073 | if (port == PORT_A) { |
775 | tmp = I915_READ(DDI_FUNC_CTL(i)); | 1074 | tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); |
776 | 1075 | ||
777 | if ((tmp & PIPE_DDI_PORT_MASK) | 1076 | switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { |
778 | == PIPE_DDI_SELECT_PORT(intel_hdmi->ddi_port)) { | 1077 | case TRANS_DDI_EDP_INPUT_A_ON: |
779 | *pipe = i; | 1078 | case TRANS_DDI_EDP_INPUT_A_ONOFF: |
780 | return true; | 1079 | *pipe = PIPE_A; |
1080 | break; | ||
1081 | case TRANS_DDI_EDP_INPUT_B_ONOFF: | ||
1082 | *pipe = PIPE_B; | ||
1083 | break; | ||
1084 | case TRANS_DDI_EDP_INPUT_C_ONOFF: | ||
1085 | *pipe = PIPE_C; | ||
1086 | break; | ||
1087 | } | ||
1088 | |||
1089 | return true; | ||
1090 | } else { | ||
1091 | for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) { | ||
1092 | tmp = I915_READ(TRANS_DDI_FUNC_CTL(i)); | ||
1093 | |||
1094 | if ((tmp & TRANS_DDI_PORT_MASK) | ||
1095 | == TRANS_DDI_SELECT_PORT(port)) { | ||
1096 | *pipe = i; | ||
1097 | return true; | ||
1098 | } | ||
781 | } | 1099 | } |
782 | } | 1100 | } |
783 | 1101 | ||
784 | DRM_DEBUG_KMS("No pipe for ddi port %i found\n", intel_hdmi->ddi_port); | 1102 | DRM_DEBUG_KMS("No pipe for ddi port %i found\n", port); |
785 | 1103 | ||
786 | return true; | 1104 | return true; |
787 | } | 1105 | } |
788 | 1106 | ||
789 | void intel_enable_ddi(struct intel_encoder *encoder) | 1107 | static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv, |
1108 | enum pipe pipe) | ||
1109 | { | ||
1110 | uint32_t temp, ret; | ||
1111 | enum port port; | ||
1112 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, | ||
1113 | pipe); | ||
1114 | int i; | ||
1115 | |||
1116 | if (cpu_transcoder == TRANSCODER_EDP) { | ||
1117 | port = PORT_A; | ||
1118 | } else { | ||
1119 | temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); | ||
1120 | temp &= TRANS_DDI_PORT_MASK; | ||
1121 | |||
1122 | for (i = PORT_B; i <= PORT_E; i++) | ||
1123 | if (temp == TRANS_DDI_SELECT_PORT(i)) | ||
1124 | port = i; | ||
1125 | } | ||
1126 | |||
1127 | ret = I915_READ(PORT_CLK_SEL(port)); | ||
1128 | |||
1129 | DRM_DEBUG_KMS("Pipe %c connected to port %c using clock 0x%08x\n", | ||
1130 | pipe_name(pipe), port_name(port), ret); | ||
1131 | |||
1132 | return ret; | ||
1133 | } | ||
1134 | |||
1135 | void intel_ddi_setup_hw_pll_state(struct drm_device *dev) | ||
790 | { | 1136 | { |
791 | struct drm_device *dev = encoder->base.dev; | ||
792 | struct drm_i915_private *dev_priv = dev->dev_private; | 1137 | struct drm_i915_private *dev_priv = dev->dev_private; |
793 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); | 1138 | enum pipe pipe; |
794 | int port = intel_hdmi->ddi_port; | 1139 | struct intel_crtc *intel_crtc; |
795 | u32 temp; | ||
796 | 1140 | ||
797 | temp = I915_READ(DDI_BUF_CTL(port)); | 1141 | for_each_pipe(pipe) { |
798 | temp |= DDI_BUF_CTL_ENABLE; | 1142 | intel_crtc = |
1143 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | ||
799 | 1144 | ||
800 | /* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width, | 1145 | if (!intel_crtc->active) |
801 | * and swing/emphasis values are ignored so nothing special needs | 1146 | continue; |
802 | * to be done besides enabling the port. | 1147 | |
803 | */ | 1148 | intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv, |
804 | I915_WRITE(DDI_BUF_CTL(port), temp); | 1149 | pipe); |
1150 | |||
1151 | switch (intel_crtc->ddi_pll_sel) { | ||
1152 | case PORT_CLK_SEL_SPLL: | ||
1153 | dev_priv->ddi_plls.spll_refcount++; | ||
1154 | break; | ||
1155 | case PORT_CLK_SEL_WRPLL1: | ||
1156 | dev_priv->ddi_plls.wrpll1_refcount++; | ||
1157 | break; | ||
1158 | case PORT_CLK_SEL_WRPLL2: | ||
1159 | dev_priv->ddi_plls.wrpll2_refcount++; | ||
1160 | break; | ||
1161 | } | ||
1162 | } | ||
805 | } | 1163 | } |
806 | 1164 | ||
807 | void intel_disable_ddi(struct intel_encoder *encoder) | 1165 | void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc) |
1166 | { | ||
1167 | struct drm_crtc *crtc = &intel_crtc->base; | ||
1168 | struct drm_i915_private *dev_priv = crtc->dev->dev_private; | ||
1169 | struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); | ||
1170 | enum port port = intel_ddi_get_encoder_port(intel_encoder); | ||
1171 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; | ||
1172 | |||
1173 | if (cpu_transcoder != TRANSCODER_EDP) | ||
1174 | I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), | ||
1175 | TRANS_CLK_SEL_PORT(port)); | ||
1176 | } | ||
1177 | |||
1178 | void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc) | ||
1179 | { | ||
1180 | struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; | ||
1181 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; | ||
1182 | |||
1183 | if (cpu_transcoder != TRANSCODER_EDP) | ||
1184 | I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), | ||
1185 | TRANS_CLK_SEL_DISABLED); | ||
1186 | } | ||
1187 | |||
1188 | static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) | ||
1189 | { | ||
1190 | struct drm_encoder *encoder = &intel_encoder->base; | ||
1191 | struct drm_crtc *crtc = encoder->crtc; | ||
1192 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; | ||
1193 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
1194 | enum port port = intel_ddi_get_encoder_port(intel_encoder); | ||
1195 | int type = intel_encoder->type; | ||
1196 | |||
1197 | if (type == INTEL_OUTPUT_EDP) { | ||
1198 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | ||
1199 | ironlake_edp_panel_vdd_on(intel_dp); | ||
1200 | ironlake_edp_panel_on(intel_dp); | ||
1201 | ironlake_edp_panel_vdd_off(intel_dp, true); | ||
1202 | } | ||
1203 | |||
1204 | WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE); | ||
1205 | I915_WRITE(PORT_CLK_SEL(port), intel_crtc->ddi_pll_sel); | ||
1206 | |||
1207 | if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { | ||
1208 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | ||
1209 | |||
1210 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); | ||
1211 | intel_dp_start_link_train(intel_dp); | ||
1212 | intel_dp_complete_link_train(intel_dp); | ||
1213 | } | ||
1214 | } | ||
1215 | |||
1216 | static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, | ||
1217 | enum port port) | ||
1218 | { | ||
1219 | uint32_t reg = DDI_BUF_CTL(port); | ||
1220 | int i; | ||
1221 | |||
1222 | for (i = 0; i < 8; i++) { | ||
1223 | udelay(1); | ||
1224 | if (I915_READ(reg) & DDI_BUF_IS_IDLE) | ||
1225 | return; | ||
1226 | } | ||
1227 | DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port)); | ||
1228 | } | ||
1229 | |||
1230 | static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) | ||
1231 | { | ||
1232 | struct drm_encoder *encoder = &intel_encoder->base; | ||
1233 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; | ||
1234 | enum port port = intel_ddi_get_encoder_port(intel_encoder); | ||
1235 | int type = intel_encoder->type; | ||
1236 | uint32_t val; | ||
1237 | bool wait = false; | ||
1238 | |||
1239 | val = I915_READ(DDI_BUF_CTL(port)); | ||
1240 | if (val & DDI_BUF_CTL_ENABLE) { | ||
1241 | val &= ~DDI_BUF_CTL_ENABLE; | ||
1242 | I915_WRITE(DDI_BUF_CTL(port), val); | ||
1243 | wait = true; | ||
1244 | } | ||
1245 | |||
1246 | val = I915_READ(DP_TP_CTL(port)); | ||
1247 | val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); | ||
1248 | val |= DP_TP_CTL_LINK_TRAIN_PAT1; | ||
1249 | I915_WRITE(DP_TP_CTL(port), val); | ||
1250 | |||
1251 | if (wait) | ||
1252 | intel_wait_ddi_buf_idle(dev_priv, port); | ||
1253 | |||
1254 | if (type == INTEL_OUTPUT_EDP) { | ||
1255 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | ||
1256 | ironlake_edp_panel_vdd_on(intel_dp); | ||
1257 | ironlake_edp_panel_off(intel_dp); | ||
1258 | } | ||
1259 | |||
1260 | I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE); | ||
1261 | } | ||
1262 | |||
1263 | static void intel_enable_ddi(struct intel_encoder *intel_encoder) | ||
1264 | { | ||
1265 | struct drm_encoder *encoder = &intel_encoder->base; | ||
1266 | struct drm_device *dev = encoder->dev; | ||
1267 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1268 | enum port port = intel_ddi_get_encoder_port(intel_encoder); | ||
1269 | int type = intel_encoder->type; | ||
1270 | |||
1271 | if (type == INTEL_OUTPUT_HDMI) { | ||
1272 | /* In HDMI/DVI mode, the port width, and swing/emphasis values | ||
1273 | * are ignored so nothing special needs to be done besides | ||
1274 | * enabling the port. | ||
1275 | */ | ||
1276 | I915_WRITE(DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE); | ||
1277 | } else if (type == INTEL_OUTPUT_EDP) { | ||
1278 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | ||
1279 | |||
1280 | ironlake_edp_backlight_on(intel_dp); | ||
1281 | } | ||
1282 | } | ||
1283 | |||
1284 | static void intel_disable_ddi(struct intel_encoder *intel_encoder) | ||
1285 | { | ||
1286 | struct drm_encoder *encoder = &intel_encoder->base; | ||
1287 | int type = intel_encoder->type; | ||
1288 | |||
1289 | if (type == INTEL_OUTPUT_EDP) { | ||
1290 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | ||
1291 | |||
1292 | ironlake_edp_backlight_off(intel_dp); | ||
1293 | } | ||
1294 | } | ||
1295 | |||
1296 | int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) | ||
1297 | { | ||
1298 | if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) | ||
1299 | return 450; | ||
1300 | else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) == | ||
1301 | LCPLL_CLK_FREQ_450) | ||
1302 | return 450; | ||
1303 | else | ||
1304 | return 540; | ||
1305 | } | ||
1306 | |||
1307 | void intel_ddi_pll_init(struct drm_device *dev) | ||
808 | { | 1308 | { |
809 | struct drm_device *dev = encoder->base.dev; | ||
810 | struct drm_i915_private *dev_priv = dev->dev_private; | 1309 | struct drm_i915_private *dev_priv = dev->dev_private; |
811 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); | 1310 | uint32_t val = I915_READ(LCPLL_CTL); |
812 | int port = intel_hdmi->ddi_port; | 1311 | |
813 | u32 temp; | 1312 | /* The LCPLL register should be turned on by the BIOS. For now let's |
1313 | * just check its state and print errors in case something is wrong. | ||
1314 | * Don't even try to turn it on. | ||
1315 | */ | ||
1316 | |||
1317 | DRM_DEBUG_KMS("CDCLK running at %dMHz\n", | ||
1318 | intel_ddi_get_cdclk_freq(dev_priv)); | ||
1319 | |||
1320 | if (val & LCPLL_CD_SOURCE_FCLK) | ||
1321 | DRM_ERROR("CDCLK source is not LCPLL\n"); | ||
1322 | |||
1323 | if (val & LCPLL_PLL_DISABLE) | ||
1324 | DRM_ERROR("LCPLL is disabled\n"); | ||
1325 | } | ||
1326 | |||
1327 | void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder) | ||
1328 | { | ||
1329 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); | ||
1330 | struct intel_dp *intel_dp = &intel_dig_port->dp; | ||
1331 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; | ||
1332 | enum port port = intel_dig_port->port; | ||
1333 | bool wait; | ||
1334 | uint32_t val; | ||
1335 | |||
1336 | if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) { | ||
1337 | val = I915_READ(DDI_BUF_CTL(port)); | ||
1338 | if (val & DDI_BUF_CTL_ENABLE) { | ||
1339 | val &= ~DDI_BUF_CTL_ENABLE; | ||
1340 | I915_WRITE(DDI_BUF_CTL(port), val); | ||
1341 | wait = true; | ||
1342 | } | ||
1343 | |||
1344 | val = I915_READ(DP_TP_CTL(port)); | ||
1345 | val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); | ||
1346 | val |= DP_TP_CTL_LINK_TRAIN_PAT1; | ||
1347 | I915_WRITE(DP_TP_CTL(port), val); | ||
1348 | POSTING_READ(DP_TP_CTL(port)); | ||
1349 | |||
1350 | if (wait) | ||
1351 | intel_wait_ddi_buf_idle(dev_priv, port); | ||
1352 | } | ||
1353 | |||
1354 | val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST | | ||
1355 | DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE; | ||
1356 | if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) | ||
1357 | val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; | ||
1358 | I915_WRITE(DP_TP_CTL(port), val); | ||
1359 | POSTING_READ(DP_TP_CTL(port)); | ||
1360 | |||
1361 | intel_dp->DP |= DDI_BUF_CTL_ENABLE; | ||
1362 | I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP); | ||
1363 | POSTING_READ(DDI_BUF_CTL(port)); | ||
1364 | |||
1365 | udelay(600); | ||
1366 | } | ||
1367 | |||
1368 | void intel_ddi_fdi_disable(struct drm_crtc *crtc) | ||
1369 | { | ||
1370 | struct drm_i915_private *dev_priv = crtc->dev->dev_private; | ||
1371 | struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); | ||
1372 | uint32_t val; | ||
1373 | |||
1374 | intel_ddi_post_disable(intel_encoder); | ||
1375 | |||
1376 | val = I915_READ(_FDI_RXA_CTL); | ||
1377 | val &= ~FDI_RX_ENABLE; | ||
1378 | I915_WRITE(_FDI_RXA_CTL, val); | ||
1379 | |||
1380 | val = I915_READ(_FDI_RXA_MISC); | ||
1381 | val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); | ||
1382 | val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); | ||
1383 | I915_WRITE(_FDI_RXA_MISC, val); | ||
1384 | |||
1385 | val = I915_READ(_FDI_RXA_CTL); | ||
1386 | val &= ~FDI_PCDCLK; | ||
1387 | I915_WRITE(_FDI_RXA_CTL, val); | ||
1388 | |||
1389 | val = I915_READ(_FDI_RXA_CTL); | ||
1390 | val &= ~FDI_RX_PLL_ENABLE; | ||
1391 | I915_WRITE(_FDI_RXA_CTL, val); | ||
1392 | } | ||
1393 | |||
1394 | static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder) | ||
1395 | { | ||
1396 | struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); | ||
1397 | int type = intel_encoder->type; | ||
1398 | |||
1399 | if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) | ||
1400 | intel_dp_check_link_status(intel_dp); | ||
1401 | } | ||
1402 | |||
1403 | static void intel_ddi_destroy(struct drm_encoder *encoder) | ||
1404 | { | ||
1405 | /* HDMI has nothing special to destroy, so we can go with this. */ | ||
1406 | intel_dp_encoder_destroy(encoder); | ||
1407 | } | ||
1408 | |||
1409 | static bool intel_ddi_mode_fixup(struct drm_encoder *encoder, | ||
1410 | const struct drm_display_mode *mode, | ||
1411 | struct drm_display_mode *adjusted_mode) | ||
1412 | { | ||
1413 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); | ||
1414 | int type = intel_encoder->type; | ||
1415 | |||
1416 | WARN(type == INTEL_OUTPUT_UNKNOWN, "mode_fixup() on unknown output!\n"); | ||
1417 | |||
1418 | if (type == INTEL_OUTPUT_HDMI) | ||
1419 | return intel_hdmi_mode_fixup(encoder, mode, adjusted_mode); | ||
1420 | else | ||
1421 | return intel_dp_mode_fixup(encoder, mode, adjusted_mode); | ||
1422 | } | ||
1423 | |||
1424 | static const struct drm_encoder_funcs intel_ddi_funcs = { | ||
1425 | .destroy = intel_ddi_destroy, | ||
1426 | }; | ||
1427 | |||
1428 | static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = { | ||
1429 | .mode_fixup = intel_ddi_mode_fixup, | ||
1430 | .mode_set = intel_ddi_mode_set, | ||
1431 | .disable = intel_encoder_noop, | ||
1432 | }; | ||
1433 | |||
1434 | void intel_ddi_init(struct drm_device *dev, enum port port) | ||
1435 | { | ||
1436 | struct intel_digital_port *intel_dig_port; | ||
1437 | struct intel_encoder *intel_encoder; | ||
1438 | struct drm_encoder *encoder; | ||
1439 | struct intel_connector *hdmi_connector = NULL; | ||
1440 | struct intel_connector *dp_connector = NULL; | ||
1441 | |||
1442 | intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); | ||
1443 | if (!intel_dig_port) | ||
1444 | return; | ||
1445 | |||
1446 | dp_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); | ||
1447 | if (!dp_connector) { | ||
1448 | kfree(intel_dig_port); | ||
1449 | return; | ||
1450 | } | ||
1451 | |||
1452 | if (port != PORT_A) { | ||
1453 | hdmi_connector = kzalloc(sizeof(struct intel_connector), | ||
1454 | GFP_KERNEL); | ||
1455 | if (!hdmi_connector) { | ||
1456 | kfree(dp_connector); | ||
1457 | kfree(intel_dig_port); | ||
1458 | return; | ||
1459 | } | ||
1460 | } | ||
1461 | |||
1462 | intel_encoder = &intel_dig_port->base; | ||
1463 | encoder = &intel_encoder->base; | ||
1464 | |||
1465 | drm_encoder_init(dev, encoder, &intel_ddi_funcs, | ||
1466 | DRM_MODE_ENCODER_TMDS); | ||
1467 | drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs); | ||
1468 | |||
1469 | intel_encoder->enable = intel_enable_ddi; | ||
1470 | intel_encoder->pre_enable = intel_ddi_pre_enable; | ||
1471 | intel_encoder->disable = intel_disable_ddi; | ||
1472 | intel_encoder->post_disable = intel_ddi_post_disable; | ||
1473 | intel_encoder->get_hw_state = intel_ddi_get_hw_state; | ||
1474 | |||
1475 | intel_dig_port->port = port; | ||
1476 | if (hdmi_connector) | ||
1477 | intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port); | ||
1478 | else | ||
1479 | intel_dig_port->hdmi.sdvox_reg = 0; | ||
1480 | intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); | ||
814 | 1481 | ||
815 | temp = I915_READ(DDI_BUF_CTL(port)); | 1482 | intel_encoder->type = INTEL_OUTPUT_UNKNOWN; |
816 | temp &= ~DDI_BUF_CTL_ENABLE; | 1483 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
1484 | intel_encoder->cloneable = false; | ||
1485 | intel_encoder->hot_plug = intel_ddi_hot_plug; | ||
817 | 1486 | ||
818 | I915_WRITE(DDI_BUF_CTL(port), temp); | 1487 | if (hdmi_connector) |
1488 | intel_hdmi_init_connector(intel_dig_port, hdmi_connector); | ||
1489 | intel_dp_init_connector(intel_dig_port, dp_connector); | ||
819 | } | 1490 | } |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 461a637f1ef7..6301d0cb45ee 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -41,8 +41,6 @@ | |||
41 | #include <drm/drm_crtc_helper.h> | 41 | #include <drm/drm_crtc_helper.h> |
42 | #include <linux/dma_remapping.h> | 42 | #include <linux/dma_remapping.h> |
43 | 43 | ||
44 | #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) | ||
45 | |||
46 | bool intel_pipe_has_type(struct drm_crtc *crtc, int type); | 44 | bool intel_pipe_has_type(struct drm_crtc *crtc, int type); |
47 | static void intel_increase_pllclock(struct drm_crtc *crtc); | 45 | static void intel_increase_pllclock(struct drm_crtc *crtc); |
48 | static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); | 46 | static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); |
@@ -80,6 +78,16 @@ struct intel_limit { | |||
80 | /* FDI */ | 78 | /* FDI */ |
81 | #define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ | 79 | #define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ |
82 | 80 | ||
81 | int | ||
82 | intel_pch_rawclk(struct drm_device *dev) | ||
83 | { | ||
84 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
85 | |||
86 | WARN_ON(!HAS_PCH_SPLIT(dev)); | ||
87 | |||
88 | return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK; | ||
89 | } | ||
90 | |||
83 | static bool | 91 | static bool |
84 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | 92 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
85 | int target, int refclk, intel_clock_t *match_clock, | 93 | int target, int refclk, intel_clock_t *match_clock, |
@@ -380,7 +388,7 @@ static const intel_limit_t intel_limits_vlv_dac = { | |||
380 | 388 | ||
381 | static const intel_limit_t intel_limits_vlv_hdmi = { | 389 | static const intel_limit_t intel_limits_vlv_hdmi = { |
382 | .dot = { .min = 20000, .max = 165000 }, | 390 | .dot = { .min = 20000, .max = 165000 }, |
383 | .vco = { .min = 5994000, .max = 4000000 }, | 391 | .vco = { .min = 4000000, .max = 5994000}, |
384 | .n = { .min = 1, .max = 7 }, | 392 | .n = { .min = 1, .max = 7 }, |
385 | .m = { .min = 60, .max = 300 }, /* guess */ | 393 | .m = { .min = 60, .max = 300 }, /* guess */ |
386 | .m1 = { .min = 2, .max = 3 }, | 394 | .m1 = { .min = 2, .max = 3 }, |
@@ -393,10 +401,10 @@ static const intel_limit_t intel_limits_vlv_hdmi = { | |||
393 | }; | 401 | }; |
394 | 402 | ||
395 | static const intel_limit_t intel_limits_vlv_dp = { | 403 | static const intel_limit_t intel_limits_vlv_dp = { |
396 | .dot = { .min = 162000, .max = 270000 }, | 404 | .dot = { .min = 25000, .max = 270000 }, |
397 | .vco = { .min = 5994000, .max = 4000000 }, | 405 | .vco = { .min = 4000000, .max = 6000000 }, |
398 | .n = { .min = 1, .max = 7 }, | 406 | .n = { .min = 1, .max = 7 }, |
399 | .m = { .min = 60, .max = 300 }, /* guess */ | 407 | .m = { .min = 22, .max = 450 }, |
400 | .m1 = { .min = 2, .max = 3 }, | 408 | .m1 = { .min = 2, .max = 3 }, |
401 | .m2 = { .min = 11, .max = 156 }, | 409 | .m2 = { .min = 11, .max = 156 }, |
402 | .p = { .min = 10, .max = 30 }, | 410 | .p = { .min = 10, .max = 30 }, |
@@ -531,7 +539,7 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, | |||
531 | limit = &intel_limits_ironlake_single_lvds; | 539 | limit = &intel_limits_ironlake_single_lvds; |
532 | } | 540 | } |
533 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || | 541 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || |
534 | HAS_eDP) | 542 | intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) |
535 | limit = &intel_limits_ironlake_display_port; | 543 | limit = &intel_limits_ironlake_display_port; |
536 | else | 544 | else |
537 | limit = &intel_limits_ironlake_dac; | 545 | limit = &intel_limits_ironlake_dac; |
@@ -927,6 +935,15 @@ intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
927 | return true; | 935 | return true; |
928 | } | 936 | } |
929 | 937 | ||
938 | enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, | ||
939 | enum pipe pipe) | ||
940 | { | ||
941 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | ||
942 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
943 | |||
944 | return intel_crtc->cpu_transcoder; | ||
945 | } | ||
946 | |||
930 | static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe) | 947 | static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe) |
931 | { | 948 | { |
932 | struct drm_i915_private *dev_priv = dev->dev_private; | 949 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -999,9 +1016,11 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe) | |||
999 | void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) | 1016 | void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) |
1000 | { | 1017 | { |
1001 | struct drm_i915_private *dev_priv = dev->dev_private; | 1018 | struct drm_i915_private *dev_priv = dev->dev_private; |
1019 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, | ||
1020 | pipe); | ||
1002 | 1021 | ||
1003 | if (INTEL_INFO(dev)->gen >= 4) { | 1022 | if (INTEL_INFO(dev)->gen >= 4) { |
1004 | int reg = PIPECONF(pipe); | 1023 | int reg = PIPECONF(cpu_transcoder); |
1005 | 1024 | ||
1006 | /* Wait for the Pipe State to go off */ | 1025 | /* Wait for the Pipe State to go off */ |
1007 | if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, | 1026 | if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, |
@@ -1103,12 +1122,14 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv, | |||
1103 | int reg; | 1122 | int reg; |
1104 | u32 val; | 1123 | u32 val; |
1105 | bool cur_state; | 1124 | bool cur_state; |
1125 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, | ||
1126 | pipe); | ||
1106 | 1127 | ||
1107 | if (IS_HASWELL(dev_priv->dev)) { | 1128 | if (IS_HASWELL(dev_priv->dev)) { |
1108 | /* On Haswell, DDI is used instead of FDI_TX_CTL */ | 1129 | /* On Haswell, DDI is used instead of FDI_TX_CTL */ |
1109 | reg = DDI_FUNC_CTL(pipe); | 1130 | reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); |
1110 | val = I915_READ(reg); | 1131 | val = I915_READ(reg); |
1111 | cur_state = !!(val & PIPE_DDI_FUNC_ENABLE); | 1132 | cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); |
1112 | } else { | 1133 | } else { |
1113 | reg = FDI_TX_CTL(pipe); | 1134 | reg = FDI_TX_CTL(pipe); |
1114 | val = I915_READ(reg); | 1135 | val = I915_READ(reg); |
@@ -1212,12 +1233,14 @@ void assert_pipe(struct drm_i915_private *dev_priv, | |||
1212 | int reg; | 1233 | int reg; |
1213 | u32 val; | 1234 | u32 val; |
1214 | bool cur_state; | 1235 | bool cur_state; |
1236 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, | ||
1237 | pipe); | ||
1215 | 1238 | ||
1216 | /* if we need the pipe A quirk it must be always on */ | 1239 | /* if we need the pipe A quirk it must be always on */ |
1217 | if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) | 1240 | if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) |
1218 | state = true; | 1241 | state = true; |
1219 | 1242 | ||
1220 | reg = PIPECONF(pipe); | 1243 | reg = PIPECONF(cpu_transcoder); |
1221 | val = I915_READ(reg); | 1244 | val = I915_READ(reg); |
1222 | cur_state = !!(val & PIPECONF_ENABLE); | 1245 | cur_state = !!(val & PIPECONF_ENABLE); |
1223 | WARN(cur_state != state, | 1246 | WARN(cur_state != state, |
@@ -1554,14 +1577,14 @@ out_unlock: | |||
1554 | } | 1577 | } |
1555 | 1578 | ||
1556 | /** | 1579 | /** |
1557 | * intel_enable_pch_pll - enable PCH PLL | 1580 | * ironlake_enable_pch_pll - enable PCH PLL |
1558 | * @dev_priv: i915 private structure | 1581 | * @dev_priv: i915 private structure |
1559 | * @pipe: pipe PLL to enable | 1582 | * @pipe: pipe PLL to enable |
1560 | * | 1583 | * |
1561 | * The PCH PLL needs to be enabled before the PCH transcoder, since it | 1584 | * The PCH PLL needs to be enabled before the PCH transcoder, since it |
1562 | * drives the transcoder clock. | 1585 | * drives the transcoder clock. |
1563 | */ | 1586 | */ |
1564 | static void intel_enable_pch_pll(struct intel_crtc *intel_crtc) | 1587 | static void ironlake_enable_pch_pll(struct intel_crtc *intel_crtc) |
1565 | { | 1588 | { |
1566 | struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; | 1589 | struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; |
1567 | struct intel_pch_pll *pll; | 1590 | struct intel_pch_pll *pll; |
@@ -1645,12 +1668,12 @@ static void intel_disable_pch_pll(struct intel_crtc *intel_crtc) | |||
1645 | pll->on = false; | 1668 | pll->on = false; |
1646 | } | 1669 | } |
1647 | 1670 | ||
1648 | static void intel_enable_transcoder(struct drm_i915_private *dev_priv, | 1671 | static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, |
1649 | enum pipe pipe) | 1672 | enum pipe pipe) |
1650 | { | 1673 | { |
1651 | int reg; | 1674 | struct drm_device *dev = dev_priv->dev; |
1652 | u32 val, pipeconf_val; | ||
1653 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | 1675 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
1676 | uint32_t reg, val, pipeconf_val; | ||
1654 | 1677 | ||
1655 | /* PCH only available on ILK+ */ | 1678 | /* PCH only available on ILK+ */ |
1656 | BUG_ON(dev_priv->info->gen < 5); | 1679 | BUG_ON(dev_priv->info->gen < 5); |
@@ -1664,10 +1687,15 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv, | |||
1664 | assert_fdi_tx_enabled(dev_priv, pipe); | 1687 | assert_fdi_tx_enabled(dev_priv, pipe); |
1665 | assert_fdi_rx_enabled(dev_priv, pipe); | 1688 | assert_fdi_rx_enabled(dev_priv, pipe); |
1666 | 1689 | ||
1667 | if (IS_HASWELL(dev_priv->dev) && pipe > 0) { | 1690 | if (HAS_PCH_CPT(dev)) { |
1668 | DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n"); | 1691 | /* Workaround: Set the timing override bit before enabling the |
1669 | return; | 1692 | * pch transcoder. */ |
1693 | reg = TRANS_CHICKEN2(pipe); | ||
1694 | val = I915_READ(reg); | ||
1695 | val |= TRANS_CHICKEN2_TIMING_OVERRIDE; | ||
1696 | I915_WRITE(reg, val); | ||
1670 | } | 1697 | } |
1698 | |||
1671 | reg = TRANSCONF(pipe); | 1699 | reg = TRANSCONF(pipe); |
1672 | val = I915_READ(reg); | 1700 | val = I915_READ(reg); |
1673 | pipeconf_val = I915_READ(PIPECONF(pipe)); | 1701 | pipeconf_val = I915_READ(PIPECONF(pipe)); |
@@ -1696,11 +1724,42 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv, | |||
1696 | DRM_ERROR("failed to enable transcoder %d\n", pipe); | 1724 | DRM_ERROR("failed to enable transcoder %d\n", pipe); |
1697 | } | 1725 | } |
1698 | 1726 | ||
1699 | static void intel_disable_transcoder(struct drm_i915_private *dev_priv, | 1727 | static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, |
1700 | enum pipe pipe) | 1728 | enum transcoder cpu_transcoder) |
1701 | { | 1729 | { |
1702 | int reg; | 1730 | u32 val, pipeconf_val; |
1703 | u32 val; | 1731 | |
1732 | /* PCH only available on ILK+ */ | ||
1733 | BUG_ON(dev_priv->info->gen < 5); | ||
1734 | |||
1735 | /* FDI must be feeding us bits for PCH ports */ | ||
1736 | assert_fdi_tx_enabled(dev_priv, cpu_transcoder); | ||
1737 | assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); | ||
1738 | |||
1739 | /* Workaround: set timing override bit. */ | ||
1740 | val = I915_READ(_TRANSA_CHICKEN2); | ||
1741 | val |= TRANS_CHICKEN2_TIMING_OVERRIDE; | ||
1742 | I915_WRITE(_TRANSA_CHICKEN2, val); | ||
1743 | |||
1744 | val = TRANS_ENABLE; | ||
1745 | pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); | ||
1746 | |||
1747 | if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == | ||
1748 | PIPECONF_INTERLACED_ILK) | ||
1749 | val |= TRANS_INTERLACED; | ||
1750 | else | ||
1751 | val |= TRANS_PROGRESSIVE; | ||
1752 | |||
1753 | I915_WRITE(TRANSCONF(TRANSCODER_A), val); | ||
1754 | if (wait_for(I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE, 100)) | ||
1755 | DRM_ERROR("Failed to enable PCH transcoder\n"); | ||
1756 | } | ||
1757 | |||
1758 | static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, | ||
1759 | enum pipe pipe) | ||
1760 | { | ||
1761 | struct drm_device *dev = dev_priv->dev; | ||
1762 | uint32_t reg, val; | ||
1704 | 1763 | ||
1705 | /* FDI relies on the transcoder */ | 1764 | /* FDI relies on the transcoder */ |
1706 | assert_fdi_tx_disabled(dev_priv, pipe); | 1765 | assert_fdi_tx_disabled(dev_priv, pipe); |
@@ -1716,6 +1775,31 @@ static void intel_disable_transcoder(struct drm_i915_private *dev_priv, | |||
1716 | /* wait for PCH transcoder off, transcoder state */ | 1775 | /* wait for PCH transcoder off, transcoder state */ |
1717 | if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) | 1776 | if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) |
1718 | DRM_ERROR("failed to disable transcoder %d\n", pipe); | 1777 | DRM_ERROR("failed to disable transcoder %d\n", pipe); |
1778 | |||
1779 | if (!HAS_PCH_IBX(dev)) { | ||
1780 | /* Workaround: Clear the timing override chicken bit again. */ | ||
1781 | reg = TRANS_CHICKEN2(pipe); | ||
1782 | val = I915_READ(reg); | ||
1783 | val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; | ||
1784 | I915_WRITE(reg, val); | ||
1785 | } | ||
1786 | } | ||
1787 | |||
1788 | static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) | ||
1789 | { | ||
1790 | u32 val; | ||
1791 | |||
1792 | val = I915_READ(_TRANSACONF); | ||
1793 | val &= ~TRANS_ENABLE; | ||
1794 | I915_WRITE(_TRANSACONF, val); | ||
1795 | /* wait for PCH transcoder off, transcoder state */ | ||
1796 | if (wait_for((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0, 50)) | ||
1797 | DRM_ERROR("Failed to disable PCH transcoder\n"); | ||
1798 | |||
1799 | /* Workaround: clear timing override bit. */ | ||
1800 | val = I915_READ(_TRANSA_CHICKEN2); | ||
1801 | val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; | ||
1802 | I915_WRITE(_TRANSA_CHICKEN2, val); | ||
1719 | } | 1803 | } |
1720 | 1804 | ||
1721 | /** | 1805 | /** |
@@ -1735,6 +1819,8 @@ static void intel_disable_transcoder(struct drm_i915_private *dev_priv, | |||
1735 | static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, | 1819 | static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, |
1736 | bool pch_port) | 1820 | bool pch_port) |
1737 | { | 1821 | { |
1822 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, | ||
1823 | pipe); | ||
1738 | int reg; | 1824 | int reg; |
1739 | u32 val; | 1825 | u32 val; |
1740 | 1826 | ||
@@ -1754,7 +1840,7 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, | |||
1754 | /* FIXME: assert CPU port conditions for SNB+ */ | 1840 | /* FIXME: assert CPU port conditions for SNB+ */ |
1755 | } | 1841 | } |
1756 | 1842 | ||
1757 | reg = PIPECONF(pipe); | 1843 | reg = PIPECONF(cpu_transcoder); |
1758 | val = I915_READ(reg); | 1844 | val = I915_READ(reg); |
1759 | if (val & PIPECONF_ENABLE) | 1845 | if (val & PIPECONF_ENABLE) |
1760 | return; | 1846 | return; |
@@ -1778,6 +1864,8 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, | |||
1778 | static void intel_disable_pipe(struct drm_i915_private *dev_priv, | 1864 | static void intel_disable_pipe(struct drm_i915_private *dev_priv, |
1779 | enum pipe pipe) | 1865 | enum pipe pipe) |
1780 | { | 1866 | { |
1867 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, | ||
1868 | pipe); | ||
1781 | int reg; | 1869 | int reg; |
1782 | u32 val; | 1870 | u32 val; |
1783 | 1871 | ||
@@ -1791,7 +1879,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv, | |||
1791 | if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) | 1879 | if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) |
1792 | return; | 1880 | return; |
1793 | 1881 | ||
1794 | reg = PIPECONF(pipe); | 1882 | reg = PIPECONF(cpu_transcoder); |
1795 | val = I915_READ(reg); | 1883 | val = I915_READ(reg); |
1796 | if ((val & PIPECONF_ENABLE) == 0) | 1884 | if ((val & PIPECONF_ENABLE) == 0) |
1797 | return; | 1885 | return; |
@@ -1807,8 +1895,10 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv, | |||
1807 | void intel_flush_display_plane(struct drm_i915_private *dev_priv, | 1895 | void intel_flush_display_plane(struct drm_i915_private *dev_priv, |
1808 | enum plane plane) | 1896 | enum plane plane) |
1809 | { | 1897 | { |
1810 | I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); | 1898 | if (dev_priv->info->gen >= 4) |
1811 | I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane))); | 1899 | I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane))); |
1900 | else | ||
1901 | I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); | ||
1812 | } | 1902 | } |
1813 | 1903 | ||
1814 | /** | 1904 | /** |
@@ -1926,9 +2016,9 @@ void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) | |||
1926 | 2016 | ||
1927 | /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel | 2017 | /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel |
1928 | * is assumed to be a power-of-two. */ | 2018 | * is assumed to be a power-of-two. */ |
1929 | static unsigned long gen4_compute_dspaddr_offset_xtiled(int *x, int *y, | 2019 | unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y, |
1930 | unsigned int bpp, | 2020 | unsigned int bpp, |
1931 | unsigned int pitch) | 2021 | unsigned int pitch) |
1932 | { | 2022 | { |
1933 | int tile_rows, tiles; | 2023 | int tile_rows, tiles; |
1934 | 2024 | ||
@@ -1969,24 +2059,38 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
1969 | dspcntr = I915_READ(reg); | 2059 | dspcntr = I915_READ(reg); |
1970 | /* Mask out pixel format bits in case we change it */ | 2060 | /* Mask out pixel format bits in case we change it */ |
1971 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; | 2061 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; |
1972 | switch (fb->bits_per_pixel) { | 2062 | switch (fb->pixel_format) { |
1973 | case 8: | 2063 | case DRM_FORMAT_C8: |
1974 | dspcntr |= DISPPLANE_8BPP; | 2064 | dspcntr |= DISPPLANE_8BPP; |
1975 | break; | 2065 | break; |
1976 | case 16: | 2066 | case DRM_FORMAT_XRGB1555: |
1977 | if (fb->depth == 15) | 2067 | case DRM_FORMAT_ARGB1555: |
1978 | dspcntr |= DISPPLANE_15_16BPP; | 2068 | dspcntr |= DISPPLANE_BGRX555; |
1979 | else | ||
1980 | dspcntr |= DISPPLANE_16BPP; | ||
1981 | break; | 2069 | break; |
1982 | case 24: | 2070 | case DRM_FORMAT_RGB565: |
1983 | case 32: | 2071 | dspcntr |= DISPPLANE_BGRX565; |
1984 | dspcntr |= DISPPLANE_32BPP_NO_ALPHA; | 2072 | break; |
2073 | case DRM_FORMAT_XRGB8888: | ||
2074 | case DRM_FORMAT_ARGB8888: | ||
2075 | dspcntr |= DISPPLANE_BGRX888; | ||
2076 | break; | ||
2077 | case DRM_FORMAT_XBGR8888: | ||
2078 | case DRM_FORMAT_ABGR8888: | ||
2079 | dspcntr |= DISPPLANE_RGBX888; | ||
2080 | break; | ||
2081 | case DRM_FORMAT_XRGB2101010: | ||
2082 | case DRM_FORMAT_ARGB2101010: | ||
2083 | dspcntr |= DISPPLANE_BGRX101010; | ||
2084 | break; | ||
2085 | case DRM_FORMAT_XBGR2101010: | ||
2086 | case DRM_FORMAT_ABGR2101010: | ||
2087 | dspcntr |= DISPPLANE_RGBX101010; | ||
1985 | break; | 2088 | break; |
1986 | default: | 2089 | default: |
1987 | DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel); | 2090 | DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format); |
1988 | return -EINVAL; | 2091 | return -EINVAL; |
1989 | } | 2092 | } |
2093 | |||
1990 | if (INTEL_INFO(dev)->gen >= 4) { | 2094 | if (INTEL_INFO(dev)->gen >= 4) { |
1991 | if (obj->tiling_mode != I915_TILING_NONE) | 2095 | if (obj->tiling_mode != I915_TILING_NONE) |
1992 | dspcntr |= DISPPLANE_TILED; | 2096 | dspcntr |= DISPPLANE_TILED; |
@@ -2000,9 +2104,9 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
2000 | 2104 | ||
2001 | if (INTEL_INFO(dev)->gen >= 4) { | 2105 | if (INTEL_INFO(dev)->gen >= 4) { |
2002 | intel_crtc->dspaddr_offset = | 2106 | intel_crtc->dspaddr_offset = |
2003 | gen4_compute_dspaddr_offset_xtiled(&x, &y, | 2107 | intel_gen4_compute_offset_xtiled(&x, &y, |
2004 | fb->bits_per_pixel / 8, | 2108 | fb->bits_per_pixel / 8, |
2005 | fb->pitches[0]); | 2109 | fb->pitches[0]); |
2006 | linear_offset -= intel_crtc->dspaddr_offset; | 2110 | linear_offset -= intel_crtc->dspaddr_offset; |
2007 | } else { | 2111 | } else { |
2008 | intel_crtc->dspaddr_offset = linear_offset; | 2112 | intel_crtc->dspaddr_offset = linear_offset; |
@@ -2053,27 +2157,31 @@ static int ironlake_update_plane(struct drm_crtc *crtc, | |||
2053 | dspcntr = I915_READ(reg); | 2157 | dspcntr = I915_READ(reg); |
2054 | /* Mask out pixel format bits in case we change it */ | 2158 | /* Mask out pixel format bits in case we change it */ |
2055 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; | 2159 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; |
2056 | switch (fb->bits_per_pixel) { | 2160 | switch (fb->pixel_format) { |
2057 | case 8: | 2161 | case DRM_FORMAT_C8: |
2058 | dspcntr |= DISPPLANE_8BPP; | 2162 | dspcntr |= DISPPLANE_8BPP; |
2059 | break; | 2163 | break; |
2060 | case 16: | 2164 | case DRM_FORMAT_RGB565: |
2061 | if (fb->depth != 16) | 2165 | dspcntr |= DISPPLANE_BGRX565; |
2062 | return -EINVAL; | ||
2063 | |||
2064 | dspcntr |= DISPPLANE_16BPP; | ||
2065 | break; | 2166 | break; |
2066 | case 24: | 2167 | case DRM_FORMAT_XRGB8888: |
2067 | case 32: | 2168 | case DRM_FORMAT_ARGB8888: |
2068 | if (fb->depth == 24) | 2169 | dspcntr |= DISPPLANE_BGRX888; |
2069 | dspcntr |= DISPPLANE_32BPP_NO_ALPHA; | 2170 | break; |
2070 | else if (fb->depth == 30) | 2171 | case DRM_FORMAT_XBGR8888: |
2071 | dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA; | 2172 | case DRM_FORMAT_ABGR8888: |
2072 | else | 2173 | dspcntr |= DISPPLANE_RGBX888; |
2073 | return -EINVAL; | 2174 | break; |
2175 | case DRM_FORMAT_XRGB2101010: | ||
2176 | case DRM_FORMAT_ARGB2101010: | ||
2177 | dspcntr |= DISPPLANE_BGRX101010; | ||
2178 | break; | ||
2179 | case DRM_FORMAT_XBGR2101010: | ||
2180 | case DRM_FORMAT_ABGR2101010: | ||
2181 | dspcntr |= DISPPLANE_RGBX101010; | ||
2074 | break; | 2182 | break; |
2075 | default: | 2183 | default: |
2076 | DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel); | 2184 | DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format); |
2077 | return -EINVAL; | 2185 | return -EINVAL; |
2078 | } | 2186 | } |
2079 | 2187 | ||
@@ -2089,9 +2197,9 @@ static int ironlake_update_plane(struct drm_crtc *crtc, | |||
2089 | 2197 | ||
2090 | linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); | 2198 | linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); |
2091 | intel_crtc->dspaddr_offset = | 2199 | intel_crtc->dspaddr_offset = |
2092 | gen4_compute_dspaddr_offset_xtiled(&x, &y, | 2200 | intel_gen4_compute_offset_xtiled(&x, &y, |
2093 | fb->bits_per_pixel / 8, | 2201 | fb->bits_per_pixel / 8, |
2094 | fb->pitches[0]); | 2202 | fb->pitches[0]); |
2095 | linear_offset -= intel_crtc->dspaddr_offset; | 2203 | linear_offset -= intel_crtc->dspaddr_offset; |
2096 | 2204 | ||
2097 | DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", | 2205 | DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", |
@@ -2099,8 +2207,12 @@ static int ironlake_update_plane(struct drm_crtc *crtc, | |||
2099 | I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); | 2207 | I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); |
2100 | I915_MODIFY_DISPBASE(DSPSURF(plane), | 2208 | I915_MODIFY_DISPBASE(DSPSURF(plane), |
2101 | obj->gtt_offset + intel_crtc->dspaddr_offset); | 2209 | obj->gtt_offset + intel_crtc->dspaddr_offset); |
2102 | I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); | 2210 | if (IS_HASWELL(dev)) { |
2103 | I915_WRITE(DSPLINOFF(plane), linear_offset); | 2211 | I915_WRITE(DSPOFFSET(plane), (y << 16) | x); |
2212 | } else { | ||
2213 | I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); | ||
2214 | I915_WRITE(DSPLINOFF(plane), linear_offset); | ||
2215 | } | ||
2104 | POSTING_READ(reg); | 2216 | POSTING_READ(reg); |
2105 | 2217 | ||
2106 | return 0; | 2218 | return 0; |
@@ -2148,13 +2260,39 @@ intel_finish_fb(struct drm_framebuffer *old_fb) | |||
2148 | return ret; | 2260 | return ret; |
2149 | } | 2261 | } |
2150 | 2262 | ||
2263 | static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y) | ||
2264 | { | ||
2265 | struct drm_device *dev = crtc->dev; | ||
2266 | struct drm_i915_master_private *master_priv; | ||
2267 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
2268 | |||
2269 | if (!dev->primary->master) | ||
2270 | return; | ||
2271 | |||
2272 | master_priv = dev->primary->master->driver_priv; | ||
2273 | if (!master_priv->sarea_priv) | ||
2274 | return; | ||
2275 | |||
2276 | switch (intel_crtc->pipe) { | ||
2277 | case 0: | ||
2278 | master_priv->sarea_priv->pipeA_x = x; | ||
2279 | master_priv->sarea_priv->pipeA_y = y; | ||
2280 | break; | ||
2281 | case 1: | ||
2282 | master_priv->sarea_priv->pipeB_x = x; | ||
2283 | master_priv->sarea_priv->pipeB_y = y; | ||
2284 | break; | ||
2285 | default: | ||
2286 | break; | ||
2287 | } | ||
2288 | } | ||
2289 | |||
2151 | static int | 2290 | static int |
2152 | intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | 2291 | intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, |
2153 | struct drm_framebuffer *fb) | 2292 | struct drm_framebuffer *fb) |
2154 | { | 2293 | { |
2155 | struct drm_device *dev = crtc->dev; | 2294 | struct drm_device *dev = crtc->dev; |
2156 | struct drm_i915_private *dev_priv = dev->dev_private; | 2295 | struct drm_i915_private *dev_priv = dev->dev_private; |
2157 | struct drm_i915_master_private *master_priv; | ||
2158 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2296 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2159 | struct drm_framebuffer *old_fb; | 2297 | struct drm_framebuffer *old_fb; |
2160 | int ret; | 2298 | int ret; |
@@ -2206,20 +2344,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
2206 | intel_update_fbc(dev); | 2344 | intel_update_fbc(dev); |
2207 | mutex_unlock(&dev->struct_mutex); | 2345 | mutex_unlock(&dev->struct_mutex); |
2208 | 2346 | ||
2209 | if (!dev->primary->master) | 2347 | intel_crtc_update_sarea_pos(crtc, x, y); |
2210 | return 0; | ||
2211 | |||
2212 | master_priv = dev->primary->master->driver_priv; | ||
2213 | if (!master_priv->sarea_priv) | ||
2214 | return 0; | ||
2215 | |||
2216 | if (intel_crtc->pipe) { | ||
2217 | master_priv->sarea_priv->pipeB_x = x; | ||
2218 | master_priv->sarea_priv->pipeB_y = y; | ||
2219 | } else { | ||
2220 | master_priv->sarea_priv->pipeA_x = x; | ||
2221 | master_priv->sarea_priv->pipeA_y = y; | ||
2222 | } | ||
2223 | 2348 | ||
2224 | return 0; | 2349 | return 0; |
2225 | } | 2350 | } |
@@ -2314,6 +2439,29 @@ static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe) | |||
2314 | POSTING_READ(SOUTH_CHICKEN1); | 2439 | POSTING_READ(SOUTH_CHICKEN1); |
2315 | } | 2440 | } |
2316 | 2441 | ||
2442 | static void ivb_modeset_global_resources(struct drm_device *dev) | ||
2443 | { | ||
2444 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2445 | struct intel_crtc *pipe_B_crtc = | ||
2446 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); | ||
2447 | struct intel_crtc *pipe_C_crtc = | ||
2448 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]); | ||
2449 | uint32_t temp; | ||
2450 | |||
2451 | /* When everything is off disable fdi C so that we could enable fdi B | ||
2452 | * with all lanes. XXX: This misses the case where a pipe is not using | ||
2453 | * any pch resources and so doesn't need any fdi lanes. */ | ||
2454 | if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) { | ||
2455 | WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); | ||
2456 | WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); | ||
2457 | |||
2458 | temp = I915_READ(SOUTH_CHICKEN1); | ||
2459 | temp &= ~FDI_BC_BIFURCATION_SELECT; | ||
2460 | DRM_DEBUG_KMS("disabling fdi C rx\n"); | ||
2461 | I915_WRITE(SOUTH_CHICKEN1, temp); | ||
2462 | } | ||
2463 | } | ||
2464 | |||
2317 | /* The FDI link training functions for ILK/Ibexpeak. */ | 2465 | /* The FDI link training functions for ILK/Ibexpeak. */ |
2318 | static void ironlake_fdi_link_train(struct drm_crtc *crtc) | 2466 | static void ironlake_fdi_link_train(struct drm_crtc *crtc) |
2319 | { | 2467 | { |
@@ -2357,11 +2505,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |||
2357 | udelay(150); | 2505 | udelay(150); |
2358 | 2506 | ||
2359 | /* Ironlake workaround, enable clock pointer after FDI enable*/ | 2507 | /* Ironlake workaround, enable clock pointer after FDI enable*/ |
2360 | if (HAS_PCH_IBX(dev)) { | 2508 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); |
2361 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); | 2509 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | |
2362 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | | 2510 | FDI_RX_PHASE_SYNC_POINTER_EN); |
2363 | FDI_RX_PHASE_SYNC_POINTER_EN); | ||
2364 | } | ||
2365 | 2511 | ||
2366 | reg = FDI_RX_IIR(pipe); | 2512 | reg = FDI_RX_IIR(pipe); |
2367 | for (tries = 0; tries < 5; tries++) { | 2513 | for (tries = 0; tries < 5; tries++) { |
@@ -2450,6 +2596,9 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) | |||
2450 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; | 2596 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; |
2451 | I915_WRITE(reg, temp | FDI_TX_ENABLE); | 2597 | I915_WRITE(reg, temp | FDI_TX_ENABLE); |
2452 | 2598 | ||
2599 | I915_WRITE(FDI_RX_MISC(pipe), | ||
2600 | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); | ||
2601 | |||
2453 | reg = FDI_RX_CTL(pipe); | 2602 | reg = FDI_RX_CTL(pipe); |
2454 | temp = I915_READ(reg); | 2603 | temp = I915_READ(reg); |
2455 | if (HAS_PCH_CPT(dev)) { | 2604 | if (HAS_PCH_CPT(dev)) { |
@@ -2464,8 +2613,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) | |||
2464 | POSTING_READ(reg); | 2613 | POSTING_READ(reg); |
2465 | udelay(150); | 2614 | udelay(150); |
2466 | 2615 | ||
2467 | if (HAS_PCH_CPT(dev)) | 2616 | cpt_phase_pointer_enable(dev, pipe); |
2468 | cpt_phase_pointer_enable(dev, pipe); | ||
2469 | 2617 | ||
2470 | for (i = 0; i < 4; i++) { | 2618 | for (i = 0; i < 4; i++) { |
2471 | reg = FDI_TX_CTL(pipe); | 2619 | reg = FDI_TX_CTL(pipe); |
@@ -2570,6 +2718,9 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) | |||
2570 | POSTING_READ(reg); | 2718 | POSTING_READ(reg); |
2571 | udelay(150); | 2719 | udelay(150); |
2572 | 2720 | ||
2721 | DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", | ||
2722 | I915_READ(FDI_RX_IIR(pipe))); | ||
2723 | |||
2573 | /* enable CPU FDI TX and PCH FDI RX */ | 2724 | /* enable CPU FDI TX and PCH FDI RX */ |
2574 | reg = FDI_TX_CTL(pipe); | 2725 | reg = FDI_TX_CTL(pipe); |
2575 | temp = I915_READ(reg); | 2726 | temp = I915_READ(reg); |
@@ -2582,6 +2733,9 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) | |||
2582 | temp |= FDI_COMPOSITE_SYNC; | 2733 | temp |= FDI_COMPOSITE_SYNC; |
2583 | I915_WRITE(reg, temp | FDI_TX_ENABLE); | 2734 | I915_WRITE(reg, temp | FDI_TX_ENABLE); |
2584 | 2735 | ||
2736 | I915_WRITE(FDI_RX_MISC(pipe), | ||
2737 | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); | ||
2738 | |||
2585 | reg = FDI_RX_CTL(pipe); | 2739 | reg = FDI_RX_CTL(pipe); |
2586 | temp = I915_READ(reg); | 2740 | temp = I915_READ(reg); |
2587 | temp &= ~FDI_LINK_TRAIN_AUTO; | 2741 | temp &= ~FDI_LINK_TRAIN_AUTO; |
@@ -2593,8 +2747,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) | |||
2593 | POSTING_READ(reg); | 2747 | POSTING_READ(reg); |
2594 | udelay(150); | 2748 | udelay(150); |
2595 | 2749 | ||
2596 | if (HAS_PCH_CPT(dev)) | 2750 | cpt_phase_pointer_enable(dev, pipe); |
2597 | cpt_phase_pointer_enable(dev, pipe); | ||
2598 | 2751 | ||
2599 | for (i = 0; i < 4; i++) { | 2752 | for (i = 0; i < 4; i++) { |
2600 | reg = FDI_TX_CTL(pipe); | 2753 | reg = FDI_TX_CTL(pipe); |
@@ -2613,7 +2766,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) | |||
2613 | if (temp & FDI_RX_BIT_LOCK || | 2766 | if (temp & FDI_RX_BIT_LOCK || |
2614 | (I915_READ(reg) & FDI_RX_BIT_LOCK)) { | 2767 | (I915_READ(reg) & FDI_RX_BIT_LOCK)) { |
2615 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); | 2768 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); |
2616 | DRM_DEBUG_KMS("FDI train 1 done.\n"); | 2769 | DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i); |
2617 | break; | 2770 | break; |
2618 | } | 2771 | } |
2619 | } | 2772 | } |
@@ -2654,7 +2807,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) | |||
2654 | 2807 | ||
2655 | if (temp & FDI_RX_SYMBOL_LOCK) { | 2808 | if (temp & FDI_RX_SYMBOL_LOCK) { |
2656 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); | 2809 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); |
2657 | DRM_DEBUG_KMS("FDI train 2 done.\n"); | 2810 | DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i); |
2658 | break; | 2811 | break; |
2659 | } | 2812 | } |
2660 | } | 2813 | } |
@@ -2671,9 +2824,6 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) | |||
2671 | int pipe = intel_crtc->pipe; | 2824 | int pipe = intel_crtc->pipe; |
2672 | u32 reg, temp; | 2825 | u32 reg, temp; |
2673 | 2826 | ||
2674 | /* Write the TU size bits so error detection works */ | ||
2675 | I915_WRITE(FDI_RX_TUSIZE1(pipe), | ||
2676 | I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); | ||
2677 | 2827 | ||
2678 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ | 2828 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ |
2679 | reg = FDI_RX_CTL(pipe); | 2829 | reg = FDI_RX_CTL(pipe); |
@@ -2839,7 +2989,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) | |||
2839 | mutex_unlock(&dev->struct_mutex); | 2989 | mutex_unlock(&dev->struct_mutex); |
2840 | } | 2990 | } |
2841 | 2991 | ||
2842 | static bool intel_crtc_driving_pch(struct drm_crtc *crtc) | 2992 | static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc) |
2843 | { | 2993 | { |
2844 | struct drm_device *dev = crtc->dev; | 2994 | struct drm_device *dev = crtc->dev; |
2845 | struct intel_encoder *intel_encoder; | 2995 | struct intel_encoder *intel_encoder; |
@@ -2849,23 +2999,6 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc) | |||
2849 | * must be driven by its own crtc; no sharing is possible. | 2999 | * must be driven by its own crtc; no sharing is possible. |
2850 | */ | 3000 | */ |
2851 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { | 3001 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
2852 | |||
2853 | /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell | ||
2854 | * CPU handles all others */ | ||
2855 | if (IS_HASWELL(dev)) { | ||
2856 | /* It is still unclear how this will work on PPT, so throw up a warning */ | ||
2857 | WARN_ON(!HAS_PCH_LPT(dev)); | ||
2858 | |||
2859 | if (intel_encoder->type == INTEL_OUTPUT_ANALOG) { | ||
2860 | DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n"); | ||
2861 | return true; | ||
2862 | } else { | ||
2863 | DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n", | ||
2864 | intel_encoder->type); | ||
2865 | return false; | ||
2866 | } | ||
2867 | } | ||
2868 | |||
2869 | switch (intel_encoder->type) { | 3002 | switch (intel_encoder->type) { |
2870 | case INTEL_OUTPUT_EDP: | 3003 | case INTEL_OUTPUT_EDP: |
2871 | if (!intel_encoder_is_pch_edp(&intel_encoder->base)) | 3004 | if (!intel_encoder_is_pch_edp(&intel_encoder->base)) |
@@ -2877,6 +3010,11 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc) | |||
2877 | return true; | 3010 | return true; |
2878 | } | 3011 | } |
2879 | 3012 | ||
3013 | static bool haswell_crtc_driving_pch(struct drm_crtc *crtc) | ||
3014 | { | ||
3015 | return intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG); | ||
3016 | } | ||
3017 | |||
2880 | /* Program iCLKIP clock to the desired frequency */ | 3018 | /* Program iCLKIP clock to the desired frequency */ |
2881 | static void lpt_program_iclkip(struct drm_crtc *crtc) | 3019 | static void lpt_program_iclkip(struct drm_crtc *crtc) |
2882 | { | 3020 | { |
@@ -2986,15 +3124,24 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) | |||
2986 | 3124 | ||
2987 | assert_transcoder_disabled(dev_priv, pipe); | 3125 | assert_transcoder_disabled(dev_priv, pipe); |
2988 | 3126 | ||
3127 | /* Write the TU size bits before fdi link training, so that error | ||
3128 | * detection works. */ | ||
3129 | I915_WRITE(FDI_RX_TUSIZE1(pipe), | ||
3130 | I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); | ||
3131 | |||
2989 | /* For PCH output, training FDI link */ | 3132 | /* For PCH output, training FDI link */ |
2990 | dev_priv->display.fdi_link_train(crtc); | 3133 | dev_priv->display.fdi_link_train(crtc); |
2991 | 3134 | ||
2992 | intel_enable_pch_pll(intel_crtc); | 3135 | /* XXX: pch pll's can be enabled any time before we enable the PCH |
3136 | * transcoder, and we actually should do this to not upset any PCH | ||
3137 | * transcoder that already use the clock when we share it. | ||
3138 | * | ||
3139 | * Note that enable_pch_pll tries to do the right thing, but get_pch_pll | ||
3140 | * unconditionally resets the pll - we need that to have the right LVDS | ||
3141 | * enable sequence. */ | ||
3142 | ironlake_enable_pch_pll(intel_crtc); | ||
2993 | 3143 | ||
2994 | if (HAS_PCH_LPT(dev)) { | 3144 | if (HAS_PCH_CPT(dev)) { |
2995 | DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n"); | ||
2996 | lpt_program_iclkip(crtc); | ||
2997 | } else if (HAS_PCH_CPT(dev)) { | ||
2998 | u32 sel; | 3145 | u32 sel; |
2999 | 3146 | ||
3000 | temp = I915_READ(PCH_DPLL_SEL); | 3147 | temp = I915_READ(PCH_DPLL_SEL); |
@@ -3031,8 +3178,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) | |||
3031 | I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); | 3178 | I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); |
3032 | I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe))); | 3179 | I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe))); |
3033 | 3180 | ||
3034 | if (!IS_HASWELL(dev)) | 3181 | intel_fdi_normal_train(crtc); |
3035 | intel_fdi_normal_train(crtc); | ||
3036 | 3182 | ||
3037 | /* For PCH DP, enable TRANS_DP_CTL */ | 3183 | /* For PCH DP, enable TRANS_DP_CTL */ |
3038 | if (HAS_PCH_CPT(dev) && | 3184 | if (HAS_PCH_CPT(dev) && |
@@ -3064,15 +3210,37 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) | |||
3064 | temp |= TRANS_DP_PORT_SEL_D; | 3210 | temp |= TRANS_DP_PORT_SEL_D; |
3065 | break; | 3211 | break; |
3066 | default: | 3212 | default: |
3067 | DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); | 3213 | BUG(); |
3068 | temp |= TRANS_DP_PORT_SEL_B; | ||
3069 | break; | ||
3070 | } | 3214 | } |
3071 | 3215 | ||
3072 | I915_WRITE(reg, temp); | 3216 | I915_WRITE(reg, temp); |
3073 | } | 3217 | } |
3074 | 3218 | ||
3075 | intel_enable_transcoder(dev_priv, pipe); | 3219 | ironlake_enable_pch_transcoder(dev_priv, pipe); |
3220 | } | ||
3221 | |||
3222 | static void lpt_pch_enable(struct drm_crtc *crtc) | ||
3223 | { | ||
3224 | struct drm_device *dev = crtc->dev; | ||
3225 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3226 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
3227 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; | ||
3228 | |||
3229 | assert_transcoder_disabled(dev_priv, TRANSCODER_A); | ||
3230 | |||
3231 | lpt_program_iclkip(crtc); | ||
3232 | |||
3233 | /* Set transcoder timing. */ | ||
3234 | I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder))); | ||
3235 | I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder))); | ||
3236 | I915_WRITE(_TRANS_HSYNC_A, I915_READ(HSYNC(cpu_transcoder))); | ||
3237 | |||
3238 | I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder))); | ||
3239 | I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder))); | ||
3240 | I915_WRITE(_TRANS_VSYNC_A, I915_READ(VSYNC(cpu_transcoder))); | ||
3241 | I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder))); | ||
3242 | |||
3243 | lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); | ||
3076 | } | 3244 | } |
3077 | 3245 | ||
3078 | static void intel_put_pch_pll(struct intel_crtc *intel_crtc) | 3246 | static void intel_put_pch_pll(struct intel_crtc *intel_crtc) |
@@ -3165,16 +3333,12 @@ prepare: /* separate function? */ | |||
3165 | void intel_cpt_verify_modeset(struct drm_device *dev, int pipe) | 3333 | void intel_cpt_verify_modeset(struct drm_device *dev, int pipe) |
3166 | { | 3334 | { |
3167 | struct drm_i915_private *dev_priv = dev->dev_private; | 3335 | struct drm_i915_private *dev_priv = dev->dev_private; |
3168 | int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe); | 3336 | int dslreg = PIPEDSL(pipe); |
3169 | u32 temp; | 3337 | u32 temp; |
3170 | 3338 | ||
3171 | temp = I915_READ(dslreg); | 3339 | temp = I915_READ(dslreg); |
3172 | udelay(500); | 3340 | udelay(500); |
3173 | if (wait_for(I915_READ(dslreg) != temp, 5)) { | 3341 | if (wait_for(I915_READ(dslreg) != temp, 5)) { |
3174 | /* Without this, mode sets may fail silently on FDI */ | ||
3175 | I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS); | ||
3176 | udelay(250); | ||
3177 | I915_WRITE(tc2reg, 0); | ||
3178 | if (wait_for(I915_READ(dslreg) != temp, 5)) | 3342 | if (wait_for(I915_READ(dslreg) != temp, 5)) |
3179 | DRM_ERROR("mode set failed: pipe %d stuck\n", pipe); | 3343 | DRM_ERROR("mode set failed: pipe %d stuck\n", pipe); |
3180 | } | 3344 | } |
@@ -3205,9 +3369,12 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
3205 | I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); | 3369 | I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); |
3206 | } | 3370 | } |
3207 | 3371 | ||
3208 | is_pch_port = intel_crtc_driving_pch(crtc); | 3372 | is_pch_port = ironlake_crtc_driving_pch(crtc); |
3209 | 3373 | ||
3210 | if (is_pch_port) { | 3374 | if (is_pch_port) { |
3375 | /* Note: FDI PLL enabling _must_ be done before we enable the | ||
3376 | * cpu pipes, hence this is separate from all the other fdi/pch | ||
3377 | * enabling. */ | ||
3211 | ironlake_fdi_pll_enable(intel_crtc); | 3378 | ironlake_fdi_pll_enable(intel_crtc); |
3212 | } else { | 3379 | } else { |
3213 | assert_fdi_tx_disabled(dev_priv, pipe); | 3380 | assert_fdi_tx_disabled(dev_priv, pipe); |
@@ -3220,7 +3387,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
3220 | 3387 | ||
3221 | /* Enable panel fitting for LVDS */ | 3388 | /* Enable panel fitting for LVDS */ |
3222 | if (dev_priv->pch_pf_size && | 3389 | if (dev_priv->pch_pf_size && |
3223 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) { | 3390 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || |
3391 | intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { | ||
3224 | /* Force use of hard-coded filter coefficients | 3392 | /* Force use of hard-coded filter coefficients |
3225 | * as some pre-programmed values are broken, | 3393 | * as some pre-programmed values are broken, |
3226 | * e.g. x201. | 3394 | * e.g. x201. |
@@ -3265,6 +3433,82 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
3265 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 3433 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
3266 | } | 3434 | } |
3267 | 3435 | ||
3436 | static void haswell_crtc_enable(struct drm_crtc *crtc) | ||
3437 | { | ||
3438 | struct drm_device *dev = crtc->dev; | ||
3439 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3440 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
3441 | struct intel_encoder *encoder; | ||
3442 | int pipe = intel_crtc->pipe; | ||
3443 | int plane = intel_crtc->plane; | ||
3444 | bool is_pch_port; | ||
3445 | |||
3446 | WARN_ON(!crtc->enabled); | ||
3447 | |||
3448 | if (intel_crtc->active) | ||
3449 | return; | ||
3450 | |||
3451 | intel_crtc->active = true; | ||
3452 | intel_update_watermarks(dev); | ||
3453 | |||
3454 | is_pch_port = haswell_crtc_driving_pch(crtc); | ||
3455 | |||
3456 | if (is_pch_port) | ||
3457 | dev_priv->display.fdi_link_train(crtc); | ||
3458 | |||
3459 | for_each_encoder_on_crtc(dev, crtc, encoder) | ||
3460 | if (encoder->pre_enable) | ||
3461 | encoder->pre_enable(encoder); | ||
3462 | |||
3463 | intel_ddi_enable_pipe_clock(intel_crtc); | ||
3464 | |||
3465 | /* Enable panel fitting for eDP */ | ||
3466 | if (dev_priv->pch_pf_size && | ||
3467 | intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { | ||
3468 | /* Force use of hard-coded filter coefficients | ||
3469 | * as some pre-programmed values are broken, | ||
3470 | * e.g. x201. | ||
3471 | */ | ||
3472 | I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); | ||
3473 | I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); | ||
3474 | I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); | ||
3475 | } | ||
3476 | |||
3477 | /* | ||
3478 | * On ILK+ LUT must be loaded before the pipe is running but with | ||
3479 | * clocks enabled | ||
3480 | */ | ||
3481 | intel_crtc_load_lut(crtc); | ||
3482 | |||
3483 | intel_ddi_set_pipe_settings(crtc); | ||
3484 | intel_ddi_enable_pipe_func(crtc); | ||
3485 | |||
3486 | intel_enable_pipe(dev_priv, pipe, is_pch_port); | ||
3487 | intel_enable_plane(dev_priv, plane, pipe); | ||
3488 | |||
3489 | if (is_pch_port) | ||
3490 | lpt_pch_enable(crtc); | ||
3491 | |||
3492 | mutex_lock(&dev->struct_mutex); | ||
3493 | intel_update_fbc(dev); | ||
3494 | mutex_unlock(&dev->struct_mutex); | ||
3495 | |||
3496 | intel_crtc_update_cursor(crtc, true); | ||
3497 | |||
3498 | for_each_encoder_on_crtc(dev, crtc, encoder) | ||
3499 | encoder->enable(encoder); | ||
3500 | |||
3501 | /* | ||
3502 | * There seems to be a race in PCH platform hw (at least on some | ||
3503 | * outputs) where an enabled pipe still completes any pageflip right | ||
3504 | * away (as if the pipe is off) instead of waiting for vblank. As soon | ||
3505 | * as the first vblank happend, everything works as expected. Hence just | ||
3506 | * wait for one vblank before returning to avoid strange things | ||
3507 | * happening. | ||
3508 | */ | ||
3509 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
3510 | } | ||
3511 | |||
3268 | static void ironlake_crtc_disable(struct drm_crtc *crtc) | 3512 | static void ironlake_crtc_disable(struct drm_crtc *crtc) |
3269 | { | 3513 | { |
3270 | struct drm_device *dev = crtc->dev; | 3514 | struct drm_device *dev = crtc->dev; |
@@ -3303,7 +3547,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
3303 | 3547 | ||
3304 | ironlake_fdi_disable(crtc); | 3548 | ironlake_fdi_disable(crtc); |
3305 | 3549 | ||
3306 | intel_disable_transcoder(dev_priv, pipe); | 3550 | ironlake_disable_pch_transcoder(dev_priv, pipe); |
3307 | 3551 | ||
3308 | if (HAS_PCH_CPT(dev)) { | 3552 | if (HAS_PCH_CPT(dev)) { |
3309 | /* disable TRANS_DP_CTL */ | 3553 | /* disable TRANS_DP_CTL */ |
@@ -3345,12 +3589,78 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
3345 | mutex_unlock(&dev->struct_mutex); | 3589 | mutex_unlock(&dev->struct_mutex); |
3346 | } | 3590 | } |
3347 | 3591 | ||
3592 | static void haswell_crtc_disable(struct drm_crtc *crtc) | ||
3593 | { | ||
3594 | struct drm_device *dev = crtc->dev; | ||
3595 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3596 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
3597 | struct intel_encoder *encoder; | ||
3598 | int pipe = intel_crtc->pipe; | ||
3599 | int plane = intel_crtc->plane; | ||
3600 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; | ||
3601 | bool is_pch_port; | ||
3602 | |||
3603 | if (!intel_crtc->active) | ||
3604 | return; | ||
3605 | |||
3606 | is_pch_port = haswell_crtc_driving_pch(crtc); | ||
3607 | |||
3608 | for_each_encoder_on_crtc(dev, crtc, encoder) | ||
3609 | encoder->disable(encoder); | ||
3610 | |||
3611 | intel_crtc_wait_for_pending_flips(crtc); | ||
3612 | drm_vblank_off(dev, pipe); | ||
3613 | intel_crtc_update_cursor(crtc, false); | ||
3614 | |||
3615 | intel_disable_plane(dev_priv, plane, pipe); | ||
3616 | |||
3617 | if (dev_priv->cfb_plane == plane) | ||
3618 | intel_disable_fbc(dev); | ||
3619 | |||
3620 | intel_disable_pipe(dev_priv, pipe); | ||
3621 | |||
3622 | intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); | ||
3623 | |||
3624 | /* Disable PF */ | ||
3625 | I915_WRITE(PF_CTL(pipe), 0); | ||
3626 | I915_WRITE(PF_WIN_SZ(pipe), 0); | ||
3627 | |||
3628 | intel_ddi_disable_pipe_clock(intel_crtc); | ||
3629 | |||
3630 | for_each_encoder_on_crtc(dev, crtc, encoder) | ||
3631 | if (encoder->post_disable) | ||
3632 | encoder->post_disable(encoder); | ||
3633 | |||
3634 | if (is_pch_port) { | ||
3635 | lpt_disable_pch_transcoder(dev_priv); | ||
3636 | intel_ddi_fdi_disable(crtc); | ||
3637 | } | ||
3638 | |||
3639 | intel_crtc->active = false; | ||
3640 | intel_update_watermarks(dev); | ||
3641 | |||
3642 | mutex_lock(&dev->struct_mutex); | ||
3643 | intel_update_fbc(dev); | ||
3644 | mutex_unlock(&dev->struct_mutex); | ||
3645 | } | ||
3646 | |||
3348 | static void ironlake_crtc_off(struct drm_crtc *crtc) | 3647 | static void ironlake_crtc_off(struct drm_crtc *crtc) |
3349 | { | 3648 | { |
3350 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 3649 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3351 | intel_put_pch_pll(intel_crtc); | 3650 | intel_put_pch_pll(intel_crtc); |
3352 | } | 3651 | } |
3353 | 3652 | ||
3653 | static void haswell_crtc_off(struct drm_crtc *crtc) | ||
3654 | { | ||
3655 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
3656 | |||
3657 | /* Stop saying we're using TRANSCODER_EDP because some other CRTC might | ||
3658 | * start using it. */ | ||
3659 | intel_crtc->cpu_transcoder = intel_crtc->pipe; | ||
3660 | |||
3661 | intel_ddi_put_crtc_pll(crtc); | ||
3662 | } | ||
3663 | |||
3354 | static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) | 3664 | static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) |
3355 | { | 3665 | { |
3356 | if (!enable && intel_crtc->overlay) { | 3666 | if (!enable && intel_crtc->overlay) { |
@@ -4050,7 +4360,7 @@ static void vlv_update_pll(struct drm_crtc *crtc, | |||
4050 | struct drm_display_mode *mode, | 4360 | struct drm_display_mode *mode, |
4051 | struct drm_display_mode *adjusted_mode, | 4361 | struct drm_display_mode *adjusted_mode, |
4052 | intel_clock_t *clock, intel_clock_t *reduced_clock, | 4362 | intel_clock_t *clock, intel_clock_t *reduced_clock, |
4053 | int refclk, int num_connectors) | 4363 | int num_connectors) |
4054 | { | 4364 | { |
4055 | struct drm_device *dev = crtc->dev; | 4365 | struct drm_device *dev = crtc->dev; |
4056 | struct drm_i915_private *dev_priv = dev->dev_private; | 4366 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -4058,9 +4368,19 @@ static void vlv_update_pll(struct drm_crtc *crtc, | |||
4058 | int pipe = intel_crtc->pipe; | 4368 | int pipe = intel_crtc->pipe; |
4059 | u32 dpll, mdiv, pdiv; | 4369 | u32 dpll, mdiv, pdiv; |
4060 | u32 bestn, bestm1, bestm2, bestp1, bestp2; | 4370 | u32 bestn, bestm1, bestm2, bestp1, bestp2; |
4061 | bool is_hdmi; | 4371 | bool is_sdvo; |
4372 | u32 temp; | ||
4062 | 4373 | ||
4063 | is_hdmi = intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); | 4374 | is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || |
4375 | intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); | ||
4376 | |||
4377 | dpll = DPLL_VGA_MODE_DIS; | ||
4378 | dpll |= DPLL_EXT_BUFFER_ENABLE_VLV; | ||
4379 | dpll |= DPLL_REFA_CLK_ENABLE_VLV; | ||
4380 | dpll |= DPLL_INTEGRATED_CLOCK_VLV; | ||
4381 | |||
4382 | I915_WRITE(DPLL(pipe), dpll); | ||
4383 | POSTING_READ(DPLL(pipe)); | ||
4064 | 4384 | ||
4065 | bestn = clock->n; | 4385 | bestn = clock->n; |
4066 | bestm1 = clock->m1; | 4386 | bestm1 = clock->m1; |
@@ -4068,12 +4388,10 @@ static void vlv_update_pll(struct drm_crtc *crtc, | |||
4068 | bestp1 = clock->p1; | 4388 | bestp1 = clock->p1; |
4069 | bestp2 = clock->p2; | 4389 | bestp2 = clock->p2; |
4070 | 4390 | ||
4071 | /* Enable DPIO clock input */ | 4391 | /* |
4072 | dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | | 4392 | * In Valleyview PLL and program lane counter registers are exposed |
4073 | DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; | 4393 | * through DPIO interface |
4074 | I915_WRITE(DPLL(pipe), dpll); | 4394 | */ |
4075 | POSTING_READ(DPLL(pipe)); | ||
4076 | |||
4077 | mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); | 4395 | mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); |
4078 | mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); | 4396 | mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); |
4079 | mdiv |= ((bestn << DPIO_N_SHIFT)); | 4397 | mdiv |= ((bestn << DPIO_N_SHIFT)); |
@@ -4084,12 +4402,13 @@ static void vlv_update_pll(struct drm_crtc *crtc, | |||
4084 | 4402 | ||
4085 | intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000); | 4403 | intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000); |
4086 | 4404 | ||
4087 | pdiv = DPIO_REFSEL_OVERRIDE | (5 << DPIO_PLL_MODESEL_SHIFT) | | 4405 | pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) | |
4088 | (3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) | | 4406 | (3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) | |
4089 | (8 << DPIO_DRIVER_CTL_SHIFT) | (5 << DPIO_CLK_BIAS_CTL_SHIFT); | 4407 | (7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) | |
4408 | (5 << DPIO_CLK_BIAS_CTL_SHIFT); | ||
4090 | intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv); | 4409 | intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv); |
4091 | 4410 | ||
4092 | intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x009f0051); | 4411 | intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b); |
4093 | 4412 | ||
4094 | dpll |= DPLL_VCO_ENABLE; | 4413 | dpll |= DPLL_VCO_ENABLE; |
4095 | I915_WRITE(DPLL(pipe), dpll); | 4414 | I915_WRITE(DPLL(pipe), dpll); |
@@ -4097,19 +4416,44 @@ static void vlv_update_pll(struct drm_crtc *crtc, | |||
4097 | if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) | 4416 | if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) |
4098 | DRM_ERROR("DPLL %d failed to lock\n", pipe); | 4417 | DRM_ERROR("DPLL %d failed to lock\n", pipe); |
4099 | 4418 | ||
4100 | if (is_hdmi) { | 4419 | intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620); |
4101 | u32 temp = intel_mode_get_pixel_multiplier(adjusted_mode); | 4420 | |
4421 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) | ||
4422 | intel_dp_set_m_n(crtc, mode, adjusted_mode); | ||
4423 | |||
4424 | I915_WRITE(DPLL(pipe), dpll); | ||
4425 | |||
4426 | /* Wait for the clocks to stabilize. */ | ||
4427 | POSTING_READ(DPLL(pipe)); | ||
4428 | udelay(150); | ||
4102 | 4429 | ||
4430 | temp = 0; | ||
4431 | if (is_sdvo) { | ||
4432 | temp = intel_mode_get_pixel_multiplier(adjusted_mode); | ||
4103 | if (temp > 1) | 4433 | if (temp > 1) |
4104 | temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; | 4434 | temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; |
4105 | else | 4435 | else |
4106 | temp = 0; | 4436 | temp = 0; |
4107 | |||
4108 | I915_WRITE(DPLL_MD(pipe), temp); | ||
4109 | POSTING_READ(DPLL_MD(pipe)); | ||
4110 | } | 4437 | } |
4438 | I915_WRITE(DPLL_MD(pipe), temp); | ||
4439 | POSTING_READ(DPLL_MD(pipe)); | ||
4111 | 4440 | ||
4112 | intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x641); /* ??? */ | 4441 | /* Now program lane control registers */ |
4442 | if(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) | ||
4443 | || intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) | ||
4444 | { | ||
4445 | temp = 0x1000C4; | ||
4446 | if(pipe == 1) | ||
4447 | temp |= (1 << 21); | ||
4448 | intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp); | ||
4449 | } | ||
4450 | if(intel_pipe_has_type(crtc,INTEL_OUTPUT_EDP)) | ||
4451 | { | ||
4452 | temp = 0x1000C4; | ||
4453 | if(pipe == 1) | ||
4454 | temp |= (1 << 21); | ||
4455 | intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp); | ||
4456 | } | ||
4113 | } | 4457 | } |
4114 | 4458 | ||
4115 | static void i9xx_update_pll(struct drm_crtc *crtc, | 4459 | static void i9xx_update_pll(struct drm_crtc *crtc, |
@@ -4125,6 +4469,8 @@ static void i9xx_update_pll(struct drm_crtc *crtc, | |||
4125 | u32 dpll; | 4469 | u32 dpll; |
4126 | bool is_sdvo; | 4470 | bool is_sdvo; |
4127 | 4471 | ||
4472 | i9xx_update_pll_dividers(crtc, clock, reduced_clock); | ||
4473 | |||
4128 | is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || | 4474 | is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || |
4129 | intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); | 4475 | intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); |
4130 | 4476 | ||
@@ -4225,7 +4571,7 @@ static void i9xx_update_pll(struct drm_crtc *crtc, | |||
4225 | 4571 | ||
4226 | static void i8xx_update_pll(struct drm_crtc *crtc, | 4572 | static void i8xx_update_pll(struct drm_crtc *crtc, |
4227 | struct drm_display_mode *adjusted_mode, | 4573 | struct drm_display_mode *adjusted_mode, |
4228 | intel_clock_t *clock, | 4574 | intel_clock_t *clock, intel_clock_t *reduced_clock, |
4229 | int num_connectors) | 4575 | int num_connectors) |
4230 | { | 4576 | { |
4231 | struct drm_device *dev = crtc->dev; | 4577 | struct drm_device *dev = crtc->dev; |
@@ -4234,6 +4580,8 @@ static void i8xx_update_pll(struct drm_crtc *crtc, | |||
4234 | int pipe = intel_crtc->pipe; | 4580 | int pipe = intel_crtc->pipe; |
4235 | u32 dpll; | 4581 | u32 dpll; |
4236 | 4582 | ||
4583 | i9xx_update_pll_dividers(crtc, clock, reduced_clock); | ||
4584 | |||
4237 | dpll = DPLL_VGA_MODE_DIS; | 4585 | dpll = DPLL_VGA_MODE_DIS; |
4238 | 4586 | ||
4239 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 4587 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
@@ -4283,6 +4631,64 @@ static void i8xx_update_pll(struct drm_crtc *crtc, | |||
4283 | I915_WRITE(DPLL(pipe), dpll); | 4631 | I915_WRITE(DPLL(pipe), dpll); |
4284 | } | 4632 | } |
4285 | 4633 | ||
4634 | static void intel_set_pipe_timings(struct intel_crtc *intel_crtc, | ||
4635 | struct drm_display_mode *mode, | ||
4636 | struct drm_display_mode *adjusted_mode) | ||
4637 | { | ||
4638 | struct drm_device *dev = intel_crtc->base.dev; | ||
4639 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4640 | enum pipe pipe = intel_crtc->pipe; | ||
4641 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; | ||
4642 | uint32_t vsyncshift; | ||
4643 | |||
4644 | if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { | ||
4645 | /* the chip adds 2 halflines automatically */ | ||
4646 | adjusted_mode->crtc_vtotal -= 1; | ||
4647 | adjusted_mode->crtc_vblank_end -= 1; | ||
4648 | vsyncshift = adjusted_mode->crtc_hsync_start | ||
4649 | - adjusted_mode->crtc_htotal / 2; | ||
4650 | } else { | ||
4651 | vsyncshift = 0; | ||
4652 | } | ||
4653 | |||
4654 | if (INTEL_INFO(dev)->gen > 3) | ||
4655 | I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); | ||
4656 | |||
4657 | I915_WRITE(HTOTAL(cpu_transcoder), | ||
4658 | (adjusted_mode->crtc_hdisplay - 1) | | ||
4659 | ((adjusted_mode->crtc_htotal - 1) << 16)); | ||
4660 | I915_WRITE(HBLANK(cpu_transcoder), | ||
4661 | (adjusted_mode->crtc_hblank_start - 1) | | ||
4662 | ((adjusted_mode->crtc_hblank_end - 1) << 16)); | ||
4663 | I915_WRITE(HSYNC(cpu_transcoder), | ||
4664 | (adjusted_mode->crtc_hsync_start - 1) | | ||
4665 | ((adjusted_mode->crtc_hsync_end - 1) << 16)); | ||
4666 | |||
4667 | I915_WRITE(VTOTAL(cpu_transcoder), | ||
4668 | (adjusted_mode->crtc_vdisplay - 1) | | ||
4669 | ((adjusted_mode->crtc_vtotal - 1) << 16)); | ||
4670 | I915_WRITE(VBLANK(cpu_transcoder), | ||
4671 | (adjusted_mode->crtc_vblank_start - 1) | | ||
4672 | ((adjusted_mode->crtc_vblank_end - 1) << 16)); | ||
4673 | I915_WRITE(VSYNC(cpu_transcoder), | ||
4674 | (adjusted_mode->crtc_vsync_start - 1) | | ||
4675 | ((adjusted_mode->crtc_vsync_end - 1) << 16)); | ||
4676 | |||
4677 | /* Workaround: when the EDP input selection is B, the VTOTAL_B must be | ||
4678 | * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is | ||
4679 | * documented on the DDI_FUNC_CTL register description, EDP Input Select | ||
4680 | * bits. */ | ||
4681 | if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP && | ||
4682 | (pipe == PIPE_B || pipe == PIPE_C)) | ||
4683 | I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); | ||
4684 | |||
4685 | /* pipesrc controls the size that is scaled from, which should | ||
4686 | * always be the user's requested size. | ||
4687 | */ | ||
4688 | I915_WRITE(PIPESRC(pipe), | ||
4689 | ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); | ||
4690 | } | ||
4691 | |||
4286 | static int i9xx_crtc_mode_set(struct drm_crtc *crtc, | 4692 | static int i9xx_crtc_mode_set(struct drm_crtc *crtc, |
4287 | struct drm_display_mode *mode, | 4693 | struct drm_display_mode *mode, |
4288 | struct drm_display_mode *adjusted_mode, | 4694 | struct drm_display_mode *adjusted_mode, |
@@ -4296,7 +4702,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, | |||
4296 | int plane = intel_crtc->plane; | 4702 | int plane = intel_crtc->plane; |
4297 | int refclk, num_connectors = 0; | 4703 | int refclk, num_connectors = 0; |
4298 | intel_clock_t clock, reduced_clock; | 4704 | intel_clock_t clock, reduced_clock; |
4299 | u32 dspcntr, pipeconf, vsyncshift; | 4705 | u32 dspcntr, pipeconf; |
4300 | bool ok, has_reduced_clock = false, is_sdvo = false; | 4706 | bool ok, has_reduced_clock = false, is_sdvo = false; |
4301 | bool is_lvds = false, is_tv = false, is_dp = false; | 4707 | bool is_lvds = false, is_tv = false, is_dp = false; |
4302 | struct intel_encoder *encoder; | 4708 | struct intel_encoder *encoder; |
@@ -4360,14 +4766,14 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, | |||
4360 | if (is_sdvo && is_tv) | 4766 | if (is_sdvo && is_tv) |
4361 | i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock); | 4767 | i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock); |
4362 | 4768 | ||
4363 | i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ? | ||
4364 | &reduced_clock : NULL); | ||
4365 | |||
4366 | if (IS_GEN2(dev)) | 4769 | if (IS_GEN2(dev)) |
4367 | i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors); | 4770 | i8xx_update_pll(crtc, adjusted_mode, &clock, |
4771 | has_reduced_clock ? &reduced_clock : NULL, | ||
4772 | num_connectors); | ||
4368 | else if (IS_VALLEYVIEW(dev)) | 4773 | else if (IS_VALLEYVIEW(dev)) |
4369 | vlv_update_pll(crtc, mode,adjusted_mode, &clock, NULL, | 4774 | vlv_update_pll(crtc, mode, adjusted_mode, &clock, |
4370 | refclk, num_connectors); | 4775 | has_reduced_clock ? &reduced_clock : NULL, |
4776 | num_connectors); | ||
4371 | else | 4777 | else |
4372 | i9xx_update_pll(crtc, mode, adjusted_mode, &clock, | 4778 | i9xx_update_pll(crtc, mode, adjusted_mode, &clock, |
4373 | has_reduced_clock ? &reduced_clock : NULL, | 4779 | has_reduced_clock ? &reduced_clock : NULL, |
@@ -4408,6 +4814,14 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, | |||
4408 | } | 4814 | } |
4409 | } | 4815 | } |
4410 | 4816 | ||
4817 | if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { | ||
4818 | if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { | ||
4819 | pipeconf |= PIPECONF_BPP_6 | | ||
4820 | PIPECONF_ENABLE | | ||
4821 | I965_PIPECONF_ACTIVE; | ||
4822 | } | ||
4823 | } | ||
4824 | |||
4411 | DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); | 4825 | DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); |
4412 | drm_mode_debug_printmodeline(mode); | 4826 | drm_mode_debug_printmodeline(mode); |
4413 | 4827 | ||
@@ -4423,40 +4837,12 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, | |||
4423 | 4837 | ||
4424 | pipeconf &= ~PIPECONF_INTERLACE_MASK; | 4838 | pipeconf &= ~PIPECONF_INTERLACE_MASK; |
4425 | if (!IS_GEN2(dev) && | 4839 | if (!IS_GEN2(dev) && |
4426 | adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { | 4840 | adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) |
4427 | pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; | 4841 | pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; |
4428 | /* the chip adds 2 halflines automatically */ | 4842 | else |
4429 | adjusted_mode->crtc_vtotal -= 1; | ||
4430 | adjusted_mode->crtc_vblank_end -= 1; | ||
4431 | vsyncshift = adjusted_mode->crtc_hsync_start | ||
4432 | - adjusted_mode->crtc_htotal/2; | ||
4433 | } else { | ||
4434 | pipeconf |= PIPECONF_PROGRESSIVE; | 4843 | pipeconf |= PIPECONF_PROGRESSIVE; |
4435 | vsyncshift = 0; | ||
4436 | } | ||
4437 | 4844 | ||
4438 | if (!IS_GEN3(dev)) | 4845 | intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); |
4439 | I915_WRITE(VSYNCSHIFT(pipe), vsyncshift); | ||
4440 | |||
4441 | I915_WRITE(HTOTAL(pipe), | ||
4442 | (adjusted_mode->crtc_hdisplay - 1) | | ||
4443 | ((adjusted_mode->crtc_htotal - 1) << 16)); | ||
4444 | I915_WRITE(HBLANK(pipe), | ||
4445 | (adjusted_mode->crtc_hblank_start - 1) | | ||
4446 | ((adjusted_mode->crtc_hblank_end - 1) << 16)); | ||
4447 | I915_WRITE(HSYNC(pipe), | ||
4448 | (adjusted_mode->crtc_hsync_start - 1) | | ||
4449 | ((adjusted_mode->crtc_hsync_end - 1) << 16)); | ||
4450 | |||
4451 | I915_WRITE(VTOTAL(pipe), | ||
4452 | (adjusted_mode->crtc_vdisplay - 1) | | ||
4453 | ((adjusted_mode->crtc_vtotal - 1) << 16)); | ||
4454 | I915_WRITE(VBLANK(pipe), | ||
4455 | (adjusted_mode->crtc_vblank_start - 1) | | ||
4456 | ((adjusted_mode->crtc_vblank_end - 1) << 16)); | ||
4457 | I915_WRITE(VSYNC(pipe), | ||
4458 | (adjusted_mode->crtc_vsync_start - 1) | | ||
4459 | ((adjusted_mode->crtc_vsync_end - 1) << 16)); | ||
4460 | 4846 | ||
4461 | /* pipesrc and dspsize control the size that is scaled from, | 4847 | /* pipesrc and dspsize control the size that is scaled from, |
4462 | * which should always be the user's requested size. | 4848 | * which should always be the user's requested size. |
@@ -4465,8 +4851,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, | |||
4465 | ((mode->vdisplay - 1) << 16) | | 4851 | ((mode->vdisplay - 1) << 16) | |
4466 | (mode->hdisplay - 1)); | 4852 | (mode->hdisplay - 1)); |
4467 | I915_WRITE(DSPPOS(plane), 0); | 4853 | I915_WRITE(DSPPOS(plane), 0); |
4468 | I915_WRITE(PIPESRC(pipe), | ||
4469 | ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); | ||
4470 | 4854 | ||
4471 | I915_WRITE(PIPECONF(pipe), pipeconf); | 4855 | I915_WRITE(PIPECONF(pipe), pipeconf); |
4472 | POSTING_READ(PIPECONF(pipe)); | 4856 | POSTING_READ(PIPECONF(pipe)); |
@@ -4657,8 +5041,8 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc, | |||
4657 | val |= PIPE_12BPC; | 5041 | val |= PIPE_12BPC; |
4658 | break; | 5042 | break; |
4659 | default: | 5043 | default: |
4660 | val |= PIPE_8BPC; | 5044 | /* Case prevented by intel_choose_pipe_bpp_dither. */ |
4661 | break; | 5045 | BUG(); |
4662 | } | 5046 | } |
4663 | 5047 | ||
4664 | val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK); | 5048 | val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK); |
@@ -4675,6 +5059,31 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc, | |||
4675 | POSTING_READ(PIPECONF(pipe)); | 5059 | POSTING_READ(PIPECONF(pipe)); |
4676 | } | 5060 | } |
4677 | 5061 | ||
5062 | static void haswell_set_pipeconf(struct drm_crtc *crtc, | ||
5063 | struct drm_display_mode *adjusted_mode, | ||
5064 | bool dither) | ||
5065 | { | ||
5066 | struct drm_i915_private *dev_priv = crtc->dev->dev_private; | ||
5067 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
5068 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; | ||
5069 | uint32_t val; | ||
5070 | |||
5071 | val = I915_READ(PIPECONF(cpu_transcoder)); | ||
5072 | |||
5073 | val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK); | ||
5074 | if (dither) | ||
5075 | val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); | ||
5076 | |||
5077 | val &= ~PIPECONF_INTERLACE_MASK_HSW; | ||
5078 | if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
5079 | val |= PIPECONF_INTERLACED_ILK; | ||
5080 | else | ||
5081 | val |= PIPECONF_PROGRESSIVE; | ||
5082 | |||
5083 | I915_WRITE(PIPECONF(cpu_transcoder), val); | ||
5084 | POSTING_READ(PIPECONF(cpu_transcoder)); | ||
5085 | } | ||
5086 | |||
4678 | static bool ironlake_compute_clocks(struct drm_crtc *crtc, | 5087 | static bool ironlake_compute_clocks(struct drm_crtc *crtc, |
4679 | struct drm_display_mode *adjusted_mode, | 5088 | struct drm_display_mode *adjusted_mode, |
4680 | intel_clock_t *clock, | 5089 | intel_clock_t *clock, |
@@ -4738,74 +5147,115 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc, | |||
4738 | return true; | 5147 | return true; |
4739 | } | 5148 | } |
4740 | 5149 | ||
4741 | static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | 5150 | static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev) |
4742 | struct drm_display_mode *mode, | 5151 | { |
4743 | struct drm_display_mode *adjusted_mode, | 5152 | struct drm_i915_private *dev_priv = dev->dev_private; |
4744 | int x, int y, | 5153 | uint32_t temp; |
4745 | struct drm_framebuffer *fb) | 5154 | |
5155 | temp = I915_READ(SOUTH_CHICKEN1); | ||
5156 | if (temp & FDI_BC_BIFURCATION_SELECT) | ||
5157 | return; | ||
5158 | |||
5159 | WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); | ||
5160 | WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); | ||
5161 | |||
5162 | temp |= FDI_BC_BIFURCATION_SELECT; | ||
5163 | DRM_DEBUG_KMS("enabling fdi C rx\n"); | ||
5164 | I915_WRITE(SOUTH_CHICKEN1, temp); | ||
5165 | POSTING_READ(SOUTH_CHICKEN1); | ||
5166 | } | ||
5167 | |||
5168 | static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc) | ||
5169 | { | ||
5170 | struct drm_device *dev = intel_crtc->base.dev; | ||
5171 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5172 | struct intel_crtc *pipe_B_crtc = | ||
5173 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); | ||
5174 | |||
5175 | DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n", | ||
5176 | intel_crtc->pipe, intel_crtc->fdi_lanes); | ||
5177 | if (intel_crtc->fdi_lanes > 4) { | ||
5178 | DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n", | ||
5179 | intel_crtc->pipe, intel_crtc->fdi_lanes); | ||
5180 | /* Clamp lanes to avoid programming the hw with bogus values. */ | ||
5181 | intel_crtc->fdi_lanes = 4; | ||
5182 | |||
5183 | return false; | ||
5184 | } | ||
5185 | |||
5186 | if (dev_priv->num_pipe == 2) | ||
5187 | return true; | ||
5188 | |||
5189 | switch (intel_crtc->pipe) { | ||
5190 | case PIPE_A: | ||
5191 | return true; | ||
5192 | case PIPE_B: | ||
5193 | if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled && | ||
5194 | intel_crtc->fdi_lanes > 2) { | ||
5195 | DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n", | ||
5196 | intel_crtc->pipe, intel_crtc->fdi_lanes); | ||
5197 | /* Clamp lanes to avoid programming the hw with bogus values. */ | ||
5198 | intel_crtc->fdi_lanes = 2; | ||
5199 | |||
5200 | return false; | ||
5201 | } | ||
5202 | |||
5203 | if (intel_crtc->fdi_lanes > 2) | ||
5204 | WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT); | ||
5205 | else | ||
5206 | cpt_enable_fdi_bc_bifurcation(dev); | ||
5207 | |||
5208 | return true; | ||
5209 | case PIPE_C: | ||
5210 | if (!pipe_B_crtc->base.enabled || pipe_B_crtc->fdi_lanes <= 2) { | ||
5211 | if (intel_crtc->fdi_lanes > 2) { | ||
5212 | DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n", | ||
5213 | intel_crtc->pipe, intel_crtc->fdi_lanes); | ||
5214 | /* Clamp lanes to avoid programming the hw with bogus values. */ | ||
5215 | intel_crtc->fdi_lanes = 2; | ||
5216 | |||
5217 | return false; | ||
5218 | } | ||
5219 | } else { | ||
5220 | DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); | ||
5221 | return false; | ||
5222 | } | ||
5223 | |||
5224 | cpt_enable_fdi_bc_bifurcation(dev); | ||
5225 | |||
5226 | return true; | ||
5227 | default: | ||
5228 | BUG(); | ||
5229 | } | ||
5230 | } | ||
5231 | |||
5232 | static void ironlake_set_m_n(struct drm_crtc *crtc, | ||
5233 | struct drm_display_mode *mode, | ||
5234 | struct drm_display_mode *adjusted_mode) | ||
4746 | { | 5235 | { |
4747 | struct drm_device *dev = crtc->dev; | 5236 | struct drm_device *dev = crtc->dev; |
4748 | struct drm_i915_private *dev_priv = dev->dev_private; | 5237 | struct drm_i915_private *dev_priv = dev->dev_private; |
4749 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5238 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4750 | int pipe = intel_crtc->pipe; | 5239 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
4751 | int plane = intel_crtc->plane; | 5240 | struct intel_encoder *intel_encoder, *edp_encoder = NULL; |
4752 | int num_connectors = 0; | ||
4753 | intel_clock_t clock, reduced_clock; | ||
4754 | u32 dpll, fp = 0, fp2 = 0; | ||
4755 | bool ok, has_reduced_clock = false, is_sdvo = false; | ||
4756 | bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; | ||
4757 | struct intel_encoder *encoder, *edp_encoder = NULL; | ||
4758 | int ret; | ||
4759 | struct fdi_m_n m_n = {0}; | 5241 | struct fdi_m_n m_n = {0}; |
4760 | u32 temp; | 5242 | int target_clock, pixel_multiplier, lane, link_bw; |
4761 | int target_clock, pixel_multiplier, lane, link_bw, factor; | 5243 | bool is_dp = false, is_cpu_edp = false; |
4762 | unsigned int pipe_bpp; | ||
4763 | bool dither; | ||
4764 | bool is_cpu_edp = false, is_pch_edp = false; | ||
4765 | 5244 | ||
4766 | for_each_encoder_on_crtc(dev, crtc, encoder) { | 5245 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
4767 | switch (encoder->type) { | 5246 | switch (intel_encoder->type) { |
4768 | case INTEL_OUTPUT_LVDS: | ||
4769 | is_lvds = true; | ||
4770 | break; | ||
4771 | case INTEL_OUTPUT_SDVO: | ||
4772 | case INTEL_OUTPUT_HDMI: | ||
4773 | is_sdvo = true; | ||
4774 | if (encoder->needs_tv_clock) | ||
4775 | is_tv = true; | ||
4776 | break; | ||
4777 | case INTEL_OUTPUT_TVOUT: | ||
4778 | is_tv = true; | ||
4779 | break; | ||
4780 | case INTEL_OUTPUT_ANALOG: | ||
4781 | is_crt = true; | ||
4782 | break; | ||
4783 | case INTEL_OUTPUT_DISPLAYPORT: | 5247 | case INTEL_OUTPUT_DISPLAYPORT: |
4784 | is_dp = true; | 5248 | is_dp = true; |
4785 | break; | 5249 | break; |
4786 | case INTEL_OUTPUT_EDP: | 5250 | case INTEL_OUTPUT_EDP: |
4787 | is_dp = true; | 5251 | is_dp = true; |
4788 | if (intel_encoder_is_pch_edp(&encoder->base)) | 5252 | if (!intel_encoder_is_pch_edp(&intel_encoder->base)) |
4789 | is_pch_edp = true; | ||
4790 | else | ||
4791 | is_cpu_edp = true; | 5253 | is_cpu_edp = true; |
4792 | edp_encoder = encoder; | 5254 | edp_encoder = intel_encoder; |
4793 | break; | 5255 | break; |
4794 | } | 5256 | } |
4795 | |||
4796 | num_connectors++; | ||
4797 | } | ||
4798 | |||
4799 | ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock, | ||
4800 | &has_reduced_clock, &reduced_clock); | ||
4801 | if (!ok) { | ||
4802 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); | ||
4803 | return -EINVAL; | ||
4804 | } | 5257 | } |
4805 | 5258 | ||
4806 | /* Ensure that the cursor is valid for the new mode before changing... */ | ||
4807 | intel_crtc_update_cursor(crtc, true); | ||
4808 | |||
4809 | /* FDI link */ | 5259 | /* FDI link */ |
4810 | pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); | 5260 | pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); |
4811 | lane = 0; | 5261 | lane = 0; |
@@ -4832,20 +5282,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
4832 | else | 5282 | else |
4833 | target_clock = adjusted_mode->clock; | 5283 | target_clock = adjusted_mode->clock; |
4834 | 5284 | ||
4835 | /* determine panel color depth */ | ||
4836 | dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp, | ||
4837 | adjusted_mode); | ||
4838 | if (is_lvds && dev_priv->lvds_dither) | ||
4839 | dither = true; | ||
4840 | |||
4841 | if (pipe_bpp != 18 && pipe_bpp != 24 && pipe_bpp != 30 && | ||
4842 | pipe_bpp != 36) { | ||
4843 | WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n", | ||
4844 | pipe_bpp); | ||
4845 | pipe_bpp = 24; | ||
4846 | } | ||
4847 | intel_crtc->bpp = pipe_bpp; | ||
4848 | |||
4849 | if (!lane) { | 5285 | if (!lane) { |
4850 | /* | 5286 | /* |
4851 | * Account for spread spectrum to avoid | 5287 | * Account for spread spectrum to avoid |
@@ -4863,10 +5299,51 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
4863 | ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, | 5299 | ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, |
4864 | &m_n); | 5300 | &m_n); |
4865 | 5301 | ||
4866 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; | 5302 | I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m); |
4867 | if (has_reduced_clock) | 5303 | I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n); |
4868 | fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | | 5304 | I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m); |
4869 | reduced_clock.m2; | 5305 | I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n); |
5306 | } | ||
5307 | |||
5308 | static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, | ||
5309 | struct drm_display_mode *adjusted_mode, | ||
5310 | intel_clock_t *clock, u32 fp) | ||
5311 | { | ||
5312 | struct drm_crtc *crtc = &intel_crtc->base; | ||
5313 | struct drm_device *dev = crtc->dev; | ||
5314 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5315 | struct intel_encoder *intel_encoder; | ||
5316 | uint32_t dpll; | ||
5317 | int factor, pixel_multiplier, num_connectors = 0; | ||
5318 | bool is_lvds = false, is_sdvo = false, is_tv = false; | ||
5319 | bool is_dp = false, is_cpu_edp = false; | ||
5320 | |||
5321 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { | ||
5322 | switch (intel_encoder->type) { | ||
5323 | case INTEL_OUTPUT_LVDS: | ||
5324 | is_lvds = true; | ||
5325 | break; | ||
5326 | case INTEL_OUTPUT_SDVO: | ||
5327 | case INTEL_OUTPUT_HDMI: | ||
5328 | is_sdvo = true; | ||
5329 | if (intel_encoder->needs_tv_clock) | ||
5330 | is_tv = true; | ||
5331 | break; | ||
5332 | case INTEL_OUTPUT_TVOUT: | ||
5333 | is_tv = true; | ||
5334 | break; | ||
5335 | case INTEL_OUTPUT_DISPLAYPORT: | ||
5336 | is_dp = true; | ||
5337 | break; | ||
5338 | case INTEL_OUTPUT_EDP: | ||
5339 | is_dp = true; | ||
5340 | if (!intel_encoder_is_pch_edp(&intel_encoder->base)) | ||
5341 | is_cpu_edp = true; | ||
5342 | break; | ||
5343 | } | ||
5344 | |||
5345 | num_connectors++; | ||
5346 | } | ||
4870 | 5347 | ||
4871 | /* Enable autotuning of the PLL clock (if permissible) */ | 5348 | /* Enable autotuning of the PLL clock (if permissible) */ |
4872 | factor = 21; | 5349 | factor = 21; |
@@ -4878,7 +5355,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
4878 | } else if (is_sdvo && is_tv) | 5355 | } else if (is_sdvo && is_tv) |
4879 | factor = 20; | 5356 | factor = 20; |
4880 | 5357 | ||
4881 | if (clock.m < factor * clock.n) | 5358 | if (clock->m < factor * clock->n) |
4882 | fp |= FP_CB_TUNE; | 5359 | fp |= FP_CB_TUNE; |
4883 | 5360 | ||
4884 | dpll = 0; | 5361 | dpll = 0; |
@@ -4888,7 +5365,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
4888 | else | 5365 | else |
4889 | dpll |= DPLLB_MODE_DAC_SERIAL; | 5366 | dpll |= DPLLB_MODE_DAC_SERIAL; |
4890 | if (is_sdvo) { | 5367 | if (is_sdvo) { |
4891 | int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); | 5368 | pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); |
4892 | if (pixel_multiplier > 1) { | 5369 | if (pixel_multiplier > 1) { |
4893 | dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; | 5370 | dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; |
4894 | } | 5371 | } |
@@ -4898,11 +5375,11 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
4898 | dpll |= DPLL_DVO_HIGH_SPEED; | 5375 | dpll |= DPLL_DVO_HIGH_SPEED; |
4899 | 5376 | ||
4900 | /* compute bitmask from p1 value */ | 5377 | /* compute bitmask from p1 value */ |
4901 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; | 5378 | dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; |
4902 | /* also FPA1 */ | 5379 | /* also FPA1 */ |
4903 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; | 5380 | dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; |
4904 | 5381 | ||
4905 | switch (clock.p2) { | 5382 | switch (clock->p2) { |
4906 | case 5: | 5383 | case 5: |
4907 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; | 5384 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; |
4908 | break; | 5385 | break; |
@@ -4928,15 +5405,79 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
4928 | else | 5405 | else |
4929 | dpll |= PLL_REF_INPUT_DREFCLK; | 5406 | dpll |= PLL_REF_INPUT_DREFCLK; |
4930 | 5407 | ||
5408 | return dpll; | ||
5409 | } | ||
5410 | |||
5411 | static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | ||
5412 | struct drm_display_mode *mode, | ||
5413 | struct drm_display_mode *adjusted_mode, | ||
5414 | int x, int y, | ||
5415 | struct drm_framebuffer *fb) | ||
5416 | { | ||
5417 | struct drm_device *dev = crtc->dev; | ||
5418 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5419 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
5420 | int pipe = intel_crtc->pipe; | ||
5421 | int plane = intel_crtc->plane; | ||
5422 | int num_connectors = 0; | ||
5423 | intel_clock_t clock, reduced_clock; | ||
5424 | u32 dpll, fp = 0, fp2 = 0; | ||
5425 | bool ok, has_reduced_clock = false; | ||
5426 | bool is_lvds = false, is_dp = false, is_cpu_edp = false; | ||
5427 | struct intel_encoder *encoder; | ||
5428 | u32 temp; | ||
5429 | int ret; | ||
5430 | bool dither, fdi_config_ok; | ||
5431 | |||
5432 | for_each_encoder_on_crtc(dev, crtc, encoder) { | ||
5433 | switch (encoder->type) { | ||
5434 | case INTEL_OUTPUT_LVDS: | ||
5435 | is_lvds = true; | ||
5436 | break; | ||
5437 | case INTEL_OUTPUT_DISPLAYPORT: | ||
5438 | is_dp = true; | ||
5439 | break; | ||
5440 | case INTEL_OUTPUT_EDP: | ||
5441 | is_dp = true; | ||
5442 | if (!intel_encoder_is_pch_edp(&encoder->base)) | ||
5443 | is_cpu_edp = true; | ||
5444 | break; | ||
5445 | } | ||
5446 | |||
5447 | num_connectors++; | ||
5448 | } | ||
5449 | |||
5450 | WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), | ||
5451 | "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); | ||
5452 | |||
5453 | ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock, | ||
5454 | &has_reduced_clock, &reduced_clock); | ||
5455 | if (!ok) { | ||
5456 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); | ||
5457 | return -EINVAL; | ||
5458 | } | ||
5459 | |||
5460 | /* Ensure that the cursor is valid for the new mode before changing... */ | ||
5461 | intel_crtc_update_cursor(crtc, true); | ||
5462 | |||
5463 | /* determine panel color depth */ | ||
5464 | dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp, | ||
5465 | adjusted_mode); | ||
5466 | if (is_lvds && dev_priv->lvds_dither) | ||
5467 | dither = true; | ||
5468 | |||
5469 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; | ||
5470 | if (has_reduced_clock) | ||
5471 | fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | | ||
5472 | reduced_clock.m2; | ||
5473 | |||
5474 | dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, fp); | ||
5475 | |||
4931 | DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); | 5476 | DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); |
4932 | drm_mode_debug_printmodeline(mode); | 5477 | drm_mode_debug_printmodeline(mode); |
4933 | 5478 | ||
4934 | /* CPU eDP is the only output that doesn't need a PCH PLL of its own on | 5479 | /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ |
4935 | * pre-Haswell/LPT generation */ | 5480 | if (!is_cpu_edp) { |
4936 | if (HAS_PCH_LPT(dev)) { | ||
4937 | DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n", | ||
4938 | pipe); | ||
4939 | } else if (!is_cpu_edp) { | ||
4940 | struct intel_pch_pll *pll; | 5481 | struct intel_pch_pll *pll; |
4941 | 5482 | ||
4942 | pll = intel_get_pch_pll(intel_crtc, dpll, fp); | 5483 | pll = intel_get_pch_pll(intel_crtc, dpll, fp); |
@@ -5022,47 +5563,13 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
5022 | } | 5563 | } |
5023 | } | 5564 | } |
5024 | 5565 | ||
5025 | if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { | 5566 | intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); |
5026 | /* the chip adds 2 halflines automatically */ | ||
5027 | adjusted_mode->crtc_vtotal -= 1; | ||
5028 | adjusted_mode->crtc_vblank_end -= 1; | ||
5029 | I915_WRITE(VSYNCSHIFT(pipe), | ||
5030 | adjusted_mode->crtc_hsync_start | ||
5031 | - adjusted_mode->crtc_htotal/2); | ||
5032 | } else { | ||
5033 | I915_WRITE(VSYNCSHIFT(pipe), 0); | ||
5034 | } | ||
5035 | |||
5036 | I915_WRITE(HTOTAL(pipe), | ||
5037 | (adjusted_mode->crtc_hdisplay - 1) | | ||
5038 | ((adjusted_mode->crtc_htotal - 1) << 16)); | ||
5039 | I915_WRITE(HBLANK(pipe), | ||
5040 | (adjusted_mode->crtc_hblank_start - 1) | | ||
5041 | ((adjusted_mode->crtc_hblank_end - 1) << 16)); | ||
5042 | I915_WRITE(HSYNC(pipe), | ||
5043 | (adjusted_mode->crtc_hsync_start - 1) | | ||
5044 | ((adjusted_mode->crtc_hsync_end - 1) << 16)); | ||
5045 | 5567 | ||
5046 | I915_WRITE(VTOTAL(pipe), | 5568 | /* Note, this also computes intel_crtc->fdi_lanes which is used below in |
5047 | (adjusted_mode->crtc_vdisplay - 1) | | 5569 | * ironlake_check_fdi_lanes. */ |
5048 | ((adjusted_mode->crtc_vtotal - 1) << 16)); | 5570 | ironlake_set_m_n(crtc, mode, adjusted_mode); |
5049 | I915_WRITE(VBLANK(pipe), | ||
5050 | (adjusted_mode->crtc_vblank_start - 1) | | ||
5051 | ((adjusted_mode->crtc_vblank_end - 1) << 16)); | ||
5052 | I915_WRITE(VSYNC(pipe), | ||
5053 | (adjusted_mode->crtc_vsync_start - 1) | | ||
5054 | ((adjusted_mode->crtc_vsync_end - 1) << 16)); | ||
5055 | 5571 | ||
5056 | /* pipesrc controls the size that is scaled from, which should | 5572 | fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc); |
5057 | * always be the user's requested size. | ||
5058 | */ | ||
5059 | I915_WRITE(PIPESRC(pipe), | ||
5060 | ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); | ||
5061 | |||
5062 | I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); | ||
5063 | I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); | ||
5064 | I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); | ||
5065 | I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); | ||
5066 | 5573 | ||
5067 | if (is_cpu_edp) | 5574 | if (is_cpu_edp) |
5068 | ironlake_set_pll_edp(crtc, adjusted_mode->clock); | 5575 | ironlake_set_pll_edp(crtc, adjusted_mode->clock); |
@@ -5081,6 +5588,217 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
5081 | 5588 | ||
5082 | intel_update_linetime_watermarks(dev, pipe, adjusted_mode); | 5589 | intel_update_linetime_watermarks(dev, pipe, adjusted_mode); |
5083 | 5590 | ||
5591 | return fdi_config_ok ? ret : -EINVAL; | ||
5592 | } | ||
5593 | |||
5594 | static int haswell_crtc_mode_set(struct drm_crtc *crtc, | ||
5595 | struct drm_display_mode *mode, | ||
5596 | struct drm_display_mode *adjusted_mode, | ||
5597 | int x, int y, | ||
5598 | struct drm_framebuffer *fb) | ||
5599 | { | ||
5600 | struct drm_device *dev = crtc->dev; | ||
5601 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5602 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
5603 | int pipe = intel_crtc->pipe; | ||
5604 | int plane = intel_crtc->plane; | ||
5605 | int num_connectors = 0; | ||
5606 | intel_clock_t clock, reduced_clock; | ||
5607 | u32 dpll = 0, fp = 0, fp2 = 0; | ||
5608 | bool ok, has_reduced_clock = false; | ||
5609 | bool is_lvds = false, is_dp = false, is_cpu_edp = false; | ||
5610 | struct intel_encoder *encoder; | ||
5611 | u32 temp; | ||
5612 | int ret; | ||
5613 | bool dither; | ||
5614 | |||
5615 | for_each_encoder_on_crtc(dev, crtc, encoder) { | ||
5616 | switch (encoder->type) { | ||
5617 | case INTEL_OUTPUT_LVDS: | ||
5618 | is_lvds = true; | ||
5619 | break; | ||
5620 | case INTEL_OUTPUT_DISPLAYPORT: | ||
5621 | is_dp = true; | ||
5622 | break; | ||
5623 | case INTEL_OUTPUT_EDP: | ||
5624 | is_dp = true; | ||
5625 | if (!intel_encoder_is_pch_edp(&encoder->base)) | ||
5626 | is_cpu_edp = true; | ||
5627 | break; | ||
5628 | } | ||
5629 | |||
5630 | num_connectors++; | ||
5631 | } | ||
5632 | |||
5633 | if (is_cpu_edp) | ||
5634 | intel_crtc->cpu_transcoder = TRANSCODER_EDP; | ||
5635 | else | ||
5636 | intel_crtc->cpu_transcoder = pipe; | ||
5637 | |||
5638 | /* We are not sure yet this won't happen. */ | ||
5639 | WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n", | ||
5640 | INTEL_PCH_TYPE(dev)); | ||
5641 | |||
5642 | WARN(num_connectors != 1, "%d connectors attached to pipe %c\n", | ||
5643 | num_connectors, pipe_name(pipe)); | ||
5644 | |||
5645 | WARN_ON(I915_READ(PIPECONF(intel_crtc->cpu_transcoder)) & | ||
5646 | (PIPECONF_ENABLE | I965_PIPECONF_ACTIVE)); | ||
5647 | |||
5648 | WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE); | ||
5649 | |||
5650 | if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock)) | ||
5651 | return -EINVAL; | ||
5652 | |||
5653 | if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { | ||
5654 | ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock, | ||
5655 | &has_reduced_clock, | ||
5656 | &reduced_clock); | ||
5657 | if (!ok) { | ||
5658 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); | ||
5659 | return -EINVAL; | ||
5660 | } | ||
5661 | } | ||
5662 | |||
5663 | /* Ensure that the cursor is valid for the new mode before changing... */ | ||
5664 | intel_crtc_update_cursor(crtc, true); | ||
5665 | |||
5666 | /* determine panel color depth */ | ||
5667 | dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp, | ||
5668 | adjusted_mode); | ||
5669 | if (is_lvds && dev_priv->lvds_dither) | ||
5670 | dither = true; | ||
5671 | |||
5672 | DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); | ||
5673 | drm_mode_debug_printmodeline(mode); | ||
5674 | |||
5675 | if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { | ||
5676 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; | ||
5677 | if (has_reduced_clock) | ||
5678 | fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | | ||
5679 | reduced_clock.m2; | ||
5680 | |||
5681 | dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, | ||
5682 | fp); | ||
5683 | |||
5684 | /* CPU eDP is the only output that doesn't need a PCH PLL of its | ||
5685 | * own on pre-Haswell/LPT generation */ | ||
5686 | if (!is_cpu_edp) { | ||
5687 | struct intel_pch_pll *pll; | ||
5688 | |||
5689 | pll = intel_get_pch_pll(intel_crtc, dpll, fp); | ||
5690 | if (pll == NULL) { | ||
5691 | DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n", | ||
5692 | pipe); | ||
5693 | return -EINVAL; | ||
5694 | } | ||
5695 | } else | ||
5696 | intel_put_pch_pll(intel_crtc); | ||
5697 | |||
5698 | /* The LVDS pin pair needs to be on before the DPLLs are | ||
5699 | * enabled. This is an exception to the general rule that | ||
5700 | * mode_set doesn't turn things on. | ||
5701 | */ | ||
5702 | if (is_lvds) { | ||
5703 | temp = I915_READ(PCH_LVDS); | ||
5704 | temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; | ||
5705 | if (HAS_PCH_CPT(dev)) { | ||
5706 | temp &= ~PORT_TRANS_SEL_MASK; | ||
5707 | temp |= PORT_TRANS_SEL_CPT(pipe); | ||
5708 | } else { | ||
5709 | if (pipe == 1) | ||
5710 | temp |= LVDS_PIPEB_SELECT; | ||
5711 | else | ||
5712 | temp &= ~LVDS_PIPEB_SELECT; | ||
5713 | } | ||
5714 | |||
5715 | /* set the corresponsding LVDS_BORDER bit */ | ||
5716 | temp |= dev_priv->lvds_border_bits; | ||
5717 | /* Set the B0-B3 data pairs corresponding to whether | ||
5718 | * we're going to set the DPLLs for dual-channel mode or | ||
5719 | * not. | ||
5720 | */ | ||
5721 | if (clock.p2 == 7) | ||
5722 | temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; | ||
5723 | else | ||
5724 | temp &= ~(LVDS_B0B3_POWER_UP | | ||
5725 | LVDS_CLKB_POWER_UP); | ||
5726 | |||
5727 | /* It would be nice to set 24 vs 18-bit mode | ||
5728 | * (LVDS_A3_POWER_UP) appropriately here, but we need to | ||
5729 | * look more thoroughly into how panels behave in the | ||
5730 | * two modes. | ||
5731 | */ | ||
5732 | temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); | ||
5733 | if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) | ||
5734 | temp |= LVDS_HSYNC_POLARITY; | ||
5735 | if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) | ||
5736 | temp |= LVDS_VSYNC_POLARITY; | ||
5737 | I915_WRITE(PCH_LVDS, temp); | ||
5738 | } | ||
5739 | } | ||
5740 | |||
5741 | if (is_dp && !is_cpu_edp) { | ||
5742 | intel_dp_set_m_n(crtc, mode, adjusted_mode); | ||
5743 | } else { | ||
5744 | if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { | ||
5745 | /* For non-DP output, clear any trans DP clock recovery | ||
5746 | * setting.*/ | ||
5747 | I915_WRITE(TRANSDATA_M1(pipe), 0); | ||
5748 | I915_WRITE(TRANSDATA_N1(pipe), 0); | ||
5749 | I915_WRITE(TRANSDPLINK_M1(pipe), 0); | ||
5750 | I915_WRITE(TRANSDPLINK_N1(pipe), 0); | ||
5751 | } | ||
5752 | } | ||
5753 | |||
5754 | intel_crtc->lowfreq_avail = false; | ||
5755 | if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { | ||
5756 | if (intel_crtc->pch_pll) { | ||
5757 | I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); | ||
5758 | |||
5759 | /* Wait for the clocks to stabilize. */ | ||
5760 | POSTING_READ(intel_crtc->pch_pll->pll_reg); | ||
5761 | udelay(150); | ||
5762 | |||
5763 | /* The pixel multiplier can only be updated once the | ||
5764 | * DPLL is enabled and the clocks are stable. | ||
5765 | * | ||
5766 | * So write it again. | ||
5767 | */ | ||
5768 | I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); | ||
5769 | } | ||
5770 | |||
5771 | if (intel_crtc->pch_pll) { | ||
5772 | if (is_lvds && has_reduced_clock && i915_powersave) { | ||
5773 | I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2); | ||
5774 | intel_crtc->lowfreq_avail = true; | ||
5775 | } else { | ||
5776 | I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp); | ||
5777 | } | ||
5778 | } | ||
5779 | } | ||
5780 | |||
5781 | intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); | ||
5782 | |||
5783 | if (!is_dp || is_cpu_edp) | ||
5784 | ironlake_set_m_n(crtc, mode, adjusted_mode); | ||
5785 | |||
5786 | if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) | ||
5787 | if (is_cpu_edp) | ||
5788 | ironlake_set_pll_edp(crtc, adjusted_mode->clock); | ||
5789 | |||
5790 | haswell_set_pipeconf(crtc, adjusted_mode, dither); | ||
5791 | |||
5792 | /* Set up the display plane register */ | ||
5793 | I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE); | ||
5794 | POSTING_READ(DSPCNTR(plane)); | ||
5795 | |||
5796 | ret = intel_pipe_set_base(crtc, x, y, fb); | ||
5797 | |||
5798 | intel_update_watermarks(dev); | ||
5799 | |||
5800 | intel_update_linetime_watermarks(dev, pipe, adjusted_mode); | ||
5801 | |||
5084 | return ret; | 5802 | return ret; |
5085 | } | 5803 | } |
5086 | 5804 | ||
@@ -5092,6 +5810,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
5092 | { | 5810 | { |
5093 | struct drm_device *dev = crtc->dev; | 5811 | struct drm_device *dev = crtc->dev; |
5094 | struct drm_i915_private *dev_priv = dev->dev_private; | 5812 | struct drm_i915_private *dev_priv = dev->dev_private; |
5813 | struct drm_encoder_helper_funcs *encoder_funcs; | ||
5814 | struct intel_encoder *encoder; | ||
5095 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5815 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5096 | int pipe = intel_crtc->pipe; | 5816 | int pipe = intel_crtc->pipe; |
5097 | int ret; | 5817 | int ret; |
@@ -5102,7 +5822,19 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
5102 | x, y, fb); | 5822 | x, y, fb); |
5103 | drm_vblank_post_modeset(dev, pipe); | 5823 | drm_vblank_post_modeset(dev, pipe); |
5104 | 5824 | ||
5105 | return ret; | 5825 | if (ret != 0) |
5826 | return ret; | ||
5827 | |||
5828 | for_each_encoder_on_crtc(dev, crtc, encoder) { | ||
5829 | DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n", | ||
5830 | encoder->base.base.id, | ||
5831 | drm_get_encoder_name(&encoder->base), | ||
5832 | mode->base.id, mode->name); | ||
5833 | encoder_funcs = encoder->base.helper_private; | ||
5834 | encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode); | ||
5835 | } | ||
5836 | |||
5837 | return 0; | ||
5106 | } | 5838 | } |
5107 | 5839 | ||
5108 | static bool intel_eld_uptodate(struct drm_connector *connector, | 5840 | static bool intel_eld_uptodate(struct drm_connector *connector, |
@@ -5738,7 +6470,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev, | |||
5738 | int depth, int bpp) | 6470 | int depth, int bpp) |
5739 | { | 6471 | { |
5740 | struct drm_i915_gem_object *obj; | 6472 | struct drm_i915_gem_object *obj; |
5741 | struct drm_mode_fb_cmd2 mode_cmd; | 6473 | struct drm_mode_fb_cmd2 mode_cmd = { 0 }; |
5742 | 6474 | ||
5743 | obj = i915_gem_alloc_object(dev, | 6475 | obj = i915_gem_alloc_object(dev, |
5744 | intel_framebuffer_size_for_mode(mode, bpp)); | 6476 | intel_framebuffer_size_for_mode(mode, bpp)); |
@@ -5868,24 +6600,19 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, | |||
5868 | DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); | 6600 | DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); |
5869 | if (IS_ERR(fb)) { | 6601 | if (IS_ERR(fb)) { |
5870 | DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); | 6602 | DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); |
5871 | goto fail; | 6603 | return false; |
5872 | } | 6604 | } |
5873 | 6605 | ||
5874 | if (!intel_set_mode(crtc, mode, 0, 0, fb)) { | 6606 | if (!intel_set_mode(crtc, mode, 0, 0, fb)) { |
5875 | DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); | 6607 | DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); |
5876 | if (old->release_fb) | 6608 | if (old->release_fb) |
5877 | old->release_fb->funcs->destroy(old->release_fb); | 6609 | old->release_fb->funcs->destroy(old->release_fb); |
5878 | goto fail; | 6610 | return false; |
5879 | } | 6611 | } |
5880 | 6612 | ||
5881 | /* let the connector get through one full cycle before testing */ | 6613 | /* let the connector get through one full cycle before testing */ |
5882 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 6614 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
5883 | |||
5884 | return true; | 6615 | return true; |
5885 | fail: | ||
5886 | connector->encoder = NULL; | ||
5887 | encoder->crtc = NULL; | ||
5888 | return false; | ||
5889 | } | 6616 | } |
5890 | 6617 | ||
5891 | void intel_release_load_detect_pipe(struct drm_connector *connector, | 6618 | void intel_release_load_detect_pipe(struct drm_connector *connector, |
@@ -6010,12 +6737,12 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | |||
6010 | { | 6737 | { |
6011 | struct drm_i915_private *dev_priv = dev->dev_private; | 6738 | struct drm_i915_private *dev_priv = dev->dev_private; |
6012 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 6739 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6013 | int pipe = intel_crtc->pipe; | 6740 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
6014 | struct drm_display_mode *mode; | 6741 | struct drm_display_mode *mode; |
6015 | int htot = I915_READ(HTOTAL(pipe)); | 6742 | int htot = I915_READ(HTOTAL(cpu_transcoder)); |
6016 | int hsync = I915_READ(HSYNC(pipe)); | 6743 | int hsync = I915_READ(HSYNC(cpu_transcoder)); |
6017 | int vtot = I915_READ(VTOTAL(pipe)); | 6744 | int vtot = I915_READ(VTOTAL(cpu_transcoder)); |
6018 | int vsync = I915_READ(VSYNC(pipe)); | 6745 | int vsync = I915_READ(VSYNC(cpu_transcoder)); |
6019 | 6746 | ||
6020 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); | 6747 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); |
6021 | if (!mode) | 6748 | if (!mode) |
@@ -7004,8 +7731,6 @@ bool intel_set_mode(struct drm_crtc *crtc, | |||
7004 | struct drm_device *dev = crtc->dev; | 7731 | struct drm_device *dev = crtc->dev; |
7005 | drm_i915_private_t *dev_priv = dev->dev_private; | 7732 | drm_i915_private_t *dev_priv = dev->dev_private; |
7006 | struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode; | 7733 | struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode; |
7007 | struct drm_encoder_helper_funcs *encoder_funcs; | ||
7008 | struct drm_encoder *encoder; | ||
7009 | struct intel_crtc *intel_crtc; | 7734 | struct intel_crtc *intel_crtc; |
7010 | unsigned disable_pipes, prepare_pipes, modeset_pipes; | 7735 | unsigned disable_pipes, prepare_pipes, modeset_pipes; |
7011 | bool ret = true; | 7736 | bool ret = true; |
@@ -7050,6 +7775,9 @@ bool intel_set_mode(struct drm_crtc *crtc, | |||
7050 | * update the the output configuration. */ | 7775 | * update the the output configuration. */ |
7051 | intel_modeset_update_state(dev, prepare_pipes); | 7776 | intel_modeset_update_state(dev, prepare_pipes); |
7052 | 7777 | ||
7778 | if (dev_priv->display.modeset_global_resources) | ||
7779 | dev_priv->display.modeset_global_resources(dev); | ||
7780 | |||
7053 | /* Set up the DPLL and any encoders state that needs to adjust or depend | 7781 | /* Set up the DPLL and any encoders state that needs to adjust or depend |
7054 | * on the DPLL. | 7782 | * on the DPLL. |
7055 | */ | 7783 | */ |
@@ -7059,18 +7787,6 @@ bool intel_set_mode(struct drm_crtc *crtc, | |||
7059 | x, y, fb); | 7787 | x, y, fb); |
7060 | if (!ret) | 7788 | if (!ret) |
7061 | goto done; | 7789 | goto done; |
7062 | |||
7063 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
7064 | |||
7065 | if (encoder->crtc != &intel_crtc->base) | ||
7066 | continue; | ||
7067 | |||
7068 | DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n", | ||
7069 | encoder->base.id, drm_get_encoder_name(encoder), | ||
7070 | mode->base.id, mode->name); | ||
7071 | encoder_funcs = encoder->helper_private; | ||
7072 | encoder_funcs->mode_set(encoder, mode, adjusted_mode); | ||
7073 | } | ||
7074 | } | 7790 | } |
7075 | 7791 | ||
7076 | /* Now enable the clocks, plane, pipe, and connectors that we set up. */ | 7792 | /* Now enable the clocks, plane, pipe, and connectors that we set up. */ |
@@ -7409,6 +8125,12 @@ static const struct drm_crtc_funcs intel_crtc_funcs = { | |||
7409 | .page_flip = intel_crtc_page_flip, | 8125 | .page_flip = intel_crtc_page_flip, |
7410 | }; | 8126 | }; |
7411 | 8127 | ||
8128 | static void intel_cpu_pll_init(struct drm_device *dev) | ||
8129 | { | ||
8130 | if (IS_HASWELL(dev)) | ||
8131 | intel_ddi_pll_init(dev); | ||
8132 | } | ||
8133 | |||
7412 | static void intel_pch_pll_init(struct drm_device *dev) | 8134 | static void intel_pch_pll_init(struct drm_device *dev) |
7413 | { | 8135 | { |
7414 | drm_i915_private_t *dev_priv = dev->dev_private; | 8136 | drm_i915_private_t *dev_priv = dev->dev_private; |
@@ -7448,6 +8170,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
7448 | /* Swap pipes & planes for FBC on pre-965 */ | 8170 | /* Swap pipes & planes for FBC on pre-965 */ |
7449 | intel_crtc->pipe = pipe; | 8171 | intel_crtc->pipe = pipe; |
7450 | intel_crtc->plane = pipe; | 8172 | intel_crtc->plane = pipe; |
8173 | intel_crtc->cpu_transcoder = pipe; | ||
7451 | if (IS_MOBILE(dev) && IS_GEN3(dev)) { | 8174 | if (IS_MOBILE(dev) && IS_GEN3(dev)) { |
7452 | DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); | 8175 | DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); |
7453 | intel_crtc->plane = !pipe; | 8176 | intel_crtc->plane = !pipe; |
@@ -7540,16 +8263,6 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
7540 | I915_WRITE(PFIT_CONTROL, 0); | 8263 | I915_WRITE(PFIT_CONTROL, 0); |
7541 | } | 8264 | } |
7542 | 8265 | ||
7543 | if (HAS_PCH_SPLIT(dev)) { | ||
7544 | dpd_is_edp = intel_dpd_is_edp(dev); | ||
7545 | |||
7546 | if (has_edp_a(dev)) | ||
7547 | intel_dp_init(dev, DP_A, PORT_A); | ||
7548 | |||
7549 | if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) | ||
7550 | intel_dp_init(dev, PCH_DP_D, PORT_D); | ||
7551 | } | ||
7552 | |||
7553 | intel_crt_init(dev); | 8266 | intel_crt_init(dev); |
7554 | 8267 | ||
7555 | if (IS_HASWELL(dev)) { | 8268 | if (IS_HASWELL(dev)) { |
@@ -7573,6 +8286,10 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
7573 | intel_ddi_init(dev, PORT_D); | 8286 | intel_ddi_init(dev, PORT_D); |
7574 | } else if (HAS_PCH_SPLIT(dev)) { | 8287 | } else if (HAS_PCH_SPLIT(dev)) { |
7575 | int found; | 8288 | int found; |
8289 | dpd_is_edp = intel_dpd_is_edp(dev); | ||
8290 | |||
8291 | if (has_edp_a(dev)) | ||
8292 | intel_dp_init(dev, DP_A, PORT_A); | ||
7576 | 8293 | ||
7577 | if (I915_READ(HDMIB) & PORT_DETECTED) { | 8294 | if (I915_READ(HDMIB) & PORT_DETECTED) { |
7578 | /* PCH SDVOB multiplex with HDMIB */ | 8295 | /* PCH SDVOB multiplex with HDMIB */ |
@@ -7592,11 +8309,15 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
7592 | if (I915_READ(PCH_DP_C) & DP_DETECTED) | 8309 | if (I915_READ(PCH_DP_C) & DP_DETECTED) |
7593 | intel_dp_init(dev, PCH_DP_C, PORT_C); | 8310 | intel_dp_init(dev, PCH_DP_C, PORT_C); |
7594 | 8311 | ||
7595 | if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) | 8312 | if (I915_READ(PCH_DP_D) & DP_DETECTED) |
7596 | intel_dp_init(dev, PCH_DP_D, PORT_D); | 8313 | intel_dp_init(dev, PCH_DP_D, PORT_D); |
7597 | } else if (IS_VALLEYVIEW(dev)) { | 8314 | } else if (IS_VALLEYVIEW(dev)) { |
7598 | int found; | 8315 | int found; |
7599 | 8316 | ||
8317 | /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */ | ||
8318 | if (I915_READ(DP_C) & DP_DETECTED) | ||
8319 | intel_dp_init(dev, DP_C, PORT_C); | ||
8320 | |||
7600 | if (I915_READ(SDVOB) & PORT_DETECTED) { | 8321 | if (I915_READ(SDVOB) & PORT_DETECTED) { |
7601 | /* SDVOB multiplex with HDMIB */ | 8322 | /* SDVOB multiplex with HDMIB */ |
7602 | found = intel_sdvo_init(dev, SDVOB, true); | 8323 | found = intel_sdvo_init(dev, SDVOB, true); |
@@ -7609,9 +8330,6 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
7609 | if (I915_READ(SDVOC) & PORT_DETECTED) | 8330 | if (I915_READ(SDVOC) & PORT_DETECTED) |
7610 | intel_hdmi_init(dev, SDVOC, PORT_C); | 8331 | intel_hdmi_init(dev, SDVOC, PORT_C); |
7611 | 8332 | ||
7612 | /* Shares lanes with HDMI on SDVOC */ | ||
7613 | if (I915_READ(DP_C) & DP_DETECTED) | ||
7614 | intel_dp_init(dev, DP_C, PORT_C); | ||
7615 | } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { | 8333 | } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { |
7616 | bool found = false; | 8334 | bool found = false; |
7617 | 8335 | ||
@@ -7667,6 +8385,8 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
7667 | 8385 | ||
7668 | if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) | 8386 | if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) |
7669 | ironlake_init_pch_refclk(dev); | 8387 | ironlake_init_pch_refclk(dev); |
8388 | |||
8389 | drm_helper_move_panel_connectors_to_head(dev); | ||
7670 | } | 8390 | } |
7671 | 8391 | ||
7672 | static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) | 8392 | static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) |
@@ -7707,27 +8427,51 @@ int intel_framebuffer_init(struct drm_device *dev, | |||
7707 | if (mode_cmd->pitches[0] & 63) | 8427 | if (mode_cmd->pitches[0] & 63) |
7708 | return -EINVAL; | 8428 | return -EINVAL; |
7709 | 8429 | ||
8430 | /* FIXME <= Gen4 stride limits are bit unclear */ | ||
8431 | if (mode_cmd->pitches[0] > 32768) | ||
8432 | return -EINVAL; | ||
8433 | |||
8434 | if (obj->tiling_mode != I915_TILING_NONE && | ||
8435 | mode_cmd->pitches[0] != obj->stride) | ||
8436 | return -EINVAL; | ||
8437 | |||
8438 | /* Reject formats not supported by any plane early. */ | ||
7710 | switch (mode_cmd->pixel_format) { | 8439 | switch (mode_cmd->pixel_format) { |
7711 | case DRM_FORMAT_RGB332: | 8440 | case DRM_FORMAT_C8: |
7712 | case DRM_FORMAT_RGB565: | 8441 | case DRM_FORMAT_RGB565: |
7713 | case DRM_FORMAT_XRGB8888: | 8442 | case DRM_FORMAT_XRGB8888: |
7714 | case DRM_FORMAT_XBGR8888: | ||
7715 | case DRM_FORMAT_ARGB8888: | 8443 | case DRM_FORMAT_ARGB8888: |
8444 | break; | ||
8445 | case DRM_FORMAT_XRGB1555: | ||
8446 | case DRM_FORMAT_ARGB1555: | ||
8447 | if (INTEL_INFO(dev)->gen > 3) | ||
8448 | return -EINVAL; | ||
8449 | break; | ||
8450 | case DRM_FORMAT_XBGR8888: | ||
8451 | case DRM_FORMAT_ABGR8888: | ||
7716 | case DRM_FORMAT_XRGB2101010: | 8452 | case DRM_FORMAT_XRGB2101010: |
7717 | case DRM_FORMAT_ARGB2101010: | 8453 | case DRM_FORMAT_ARGB2101010: |
7718 | /* RGB formats are common across chipsets */ | 8454 | case DRM_FORMAT_XBGR2101010: |
8455 | case DRM_FORMAT_ABGR2101010: | ||
8456 | if (INTEL_INFO(dev)->gen < 4) | ||
8457 | return -EINVAL; | ||
7719 | break; | 8458 | break; |
7720 | case DRM_FORMAT_YUYV: | 8459 | case DRM_FORMAT_YUYV: |
7721 | case DRM_FORMAT_UYVY: | 8460 | case DRM_FORMAT_UYVY: |
7722 | case DRM_FORMAT_YVYU: | 8461 | case DRM_FORMAT_YVYU: |
7723 | case DRM_FORMAT_VYUY: | 8462 | case DRM_FORMAT_VYUY: |
8463 | if (INTEL_INFO(dev)->gen < 6) | ||
8464 | return -EINVAL; | ||
7724 | break; | 8465 | break; |
7725 | default: | 8466 | default: |
7726 | DRM_DEBUG_KMS("unsupported pixel format %u\n", | 8467 | DRM_DEBUG_KMS("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format); |
7727 | mode_cmd->pixel_format); | ||
7728 | return -EINVAL; | 8468 | return -EINVAL; |
7729 | } | 8469 | } |
7730 | 8470 | ||
8471 | /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ | ||
8472 | if (mode_cmd->offsets[0] != 0) | ||
8473 | return -EINVAL; | ||
8474 | |||
7731 | ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); | 8475 | ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); |
7732 | if (ret) { | 8476 | if (ret) { |
7733 | DRM_ERROR("framebuffer init failed %d\n", ret); | 8477 | DRM_ERROR("framebuffer init failed %d\n", ret); |
@@ -7765,7 +8509,13 @@ static void intel_init_display(struct drm_device *dev) | |||
7765 | struct drm_i915_private *dev_priv = dev->dev_private; | 8509 | struct drm_i915_private *dev_priv = dev->dev_private; |
7766 | 8510 | ||
7767 | /* We always want a DPMS function */ | 8511 | /* We always want a DPMS function */ |
7768 | if (HAS_PCH_SPLIT(dev)) { | 8512 | if (IS_HASWELL(dev)) { |
8513 | dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; | ||
8514 | dev_priv->display.crtc_enable = haswell_crtc_enable; | ||
8515 | dev_priv->display.crtc_disable = haswell_crtc_disable; | ||
8516 | dev_priv->display.off = haswell_crtc_off; | ||
8517 | dev_priv->display.update_plane = ironlake_update_plane; | ||
8518 | } else if (HAS_PCH_SPLIT(dev)) { | ||
7769 | dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; | 8519 | dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; |
7770 | dev_priv->display.crtc_enable = ironlake_crtc_enable; | 8520 | dev_priv->display.crtc_enable = ironlake_crtc_enable; |
7771 | dev_priv->display.crtc_disable = ironlake_crtc_disable; | 8521 | dev_priv->display.crtc_disable = ironlake_crtc_disable; |
@@ -7816,6 +8566,8 @@ static void intel_init_display(struct drm_device *dev) | |||
7816 | /* FIXME: detect B0+ stepping and use auto training */ | 8566 | /* FIXME: detect B0+ stepping and use auto training */ |
7817 | dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; | 8567 | dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; |
7818 | dev_priv->display.write_eld = ironlake_write_eld; | 8568 | dev_priv->display.write_eld = ironlake_write_eld; |
8569 | dev_priv->display.modeset_global_resources = | ||
8570 | ivb_modeset_global_resources; | ||
7819 | } else if (IS_HASWELL(dev)) { | 8571 | } else if (IS_HASWELL(dev)) { |
7820 | dev_priv->display.fdi_link_train = hsw_fdi_link_train; | 8572 | dev_priv->display.fdi_link_train = hsw_fdi_link_train; |
7821 | dev_priv->display.write_eld = haswell_write_eld; | 8573 | dev_priv->display.write_eld = haswell_write_eld; |
@@ -8047,6 +8799,7 @@ void intel_modeset_init(struct drm_device *dev) | |||
8047 | DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret); | 8799 | DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret); |
8048 | } | 8800 | } |
8049 | 8801 | ||
8802 | intel_cpu_pll_init(dev); | ||
8050 | intel_pch_pll_init(dev); | 8803 | intel_pch_pll_init(dev); |
8051 | 8804 | ||
8052 | /* Just disable it once at startup */ | 8805 | /* Just disable it once at startup */ |
@@ -8116,7 +8869,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) | |||
8116 | u32 reg; | 8869 | u32 reg; |
8117 | 8870 | ||
8118 | /* Clear any frame start delays used for debugging left by the BIOS */ | 8871 | /* Clear any frame start delays used for debugging left by the BIOS */ |
8119 | reg = PIPECONF(crtc->pipe); | 8872 | reg = PIPECONF(crtc->cpu_transcoder); |
8120 | I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); | 8873 | I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); |
8121 | 8874 | ||
8122 | /* We need to sanitize the plane -> pipe mapping first because this will | 8875 | /* We need to sanitize the plane -> pipe mapping first because this will |
@@ -8244,10 +8997,35 @@ void intel_modeset_setup_hw_state(struct drm_device *dev) | |||
8244 | struct intel_encoder *encoder; | 8997 | struct intel_encoder *encoder; |
8245 | struct intel_connector *connector; | 8998 | struct intel_connector *connector; |
8246 | 8999 | ||
9000 | if (IS_HASWELL(dev)) { | ||
9001 | tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); | ||
9002 | |||
9003 | if (tmp & TRANS_DDI_FUNC_ENABLE) { | ||
9004 | switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { | ||
9005 | case TRANS_DDI_EDP_INPUT_A_ON: | ||
9006 | case TRANS_DDI_EDP_INPUT_A_ONOFF: | ||
9007 | pipe = PIPE_A; | ||
9008 | break; | ||
9009 | case TRANS_DDI_EDP_INPUT_B_ONOFF: | ||
9010 | pipe = PIPE_B; | ||
9011 | break; | ||
9012 | case TRANS_DDI_EDP_INPUT_C_ONOFF: | ||
9013 | pipe = PIPE_C; | ||
9014 | break; | ||
9015 | } | ||
9016 | |||
9017 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | ||
9018 | crtc->cpu_transcoder = TRANSCODER_EDP; | ||
9019 | |||
9020 | DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n", | ||
9021 | pipe_name(pipe)); | ||
9022 | } | ||
9023 | } | ||
9024 | |||
8247 | for_each_pipe(pipe) { | 9025 | for_each_pipe(pipe) { |
8248 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | 9026 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
8249 | 9027 | ||
8250 | tmp = I915_READ(PIPECONF(pipe)); | 9028 | tmp = I915_READ(PIPECONF(crtc->cpu_transcoder)); |
8251 | if (tmp & PIPECONF_ENABLE) | 9029 | if (tmp & PIPECONF_ENABLE) |
8252 | crtc->active = true; | 9030 | crtc->active = true; |
8253 | else | 9031 | else |
@@ -8260,6 +9038,9 @@ void intel_modeset_setup_hw_state(struct drm_device *dev) | |||
8260 | crtc->active ? "enabled" : "disabled"); | 9038 | crtc->active ? "enabled" : "disabled"); |
8261 | } | 9039 | } |
8262 | 9040 | ||
9041 | if (IS_HASWELL(dev)) | ||
9042 | intel_ddi_setup_hw_pll_state(dev); | ||
9043 | |||
8263 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, | 9044 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
8264 | base.head) { | 9045 | base.head) { |
8265 | pipe = 0; | 9046 | pipe = 0; |
@@ -8309,6 +9090,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev) | |||
8309 | intel_modeset_update_staged_output_state(dev); | 9090 | intel_modeset_update_staged_output_state(dev); |
8310 | 9091 | ||
8311 | intel_modeset_check_state(dev); | 9092 | intel_modeset_check_state(dev); |
9093 | |||
9094 | drm_mode_config_reset(dev); | ||
8312 | } | 9095 | } |
8313 | 9096 | ||
8314 | void intel_modeset_gem_init(struct drm_device *dev) | 9097 | void intel_modeset_gem_init(struct drm_device *dev) |
@@ -8436,6 +9219,7 @@ intel_display_capture_error_state(struct drm_device *dev) | |||
8436 | { | 9219 | { |
8437 | drm_i915_private_t *dev_priv = dev->dev_private; | 9220 | drm_i915_private_t *dev_priv = dev->dev_private; |
8438 | struct intel_display_error_state *error; | 9221 | struct intel_display_error_state *error; |
9222 | enum transcoder cpu_transcoder; | ||
8439 | int i; | 9223 | int i; |
8440 | 9224 | ||
8441 | error = kmalloc(sizeof(*error), GFP_ATOMIC); | 9225 | error = kmalloc(sizeof(*error), GFP_ATOMIC); |
@@ -8443,6 +9227,8 @@ intel_display_capture_error_state(struct drm_device *dev) | |||
8443 | return NULL; | 9227 | return NULL; |
8444 | 9228 | ||
8445 | for_each_pipe(i) { | 9229 | for_each_pipe(i) { |
9230 | cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i); | ||
9231 | |||
8446 | error->cursor[i].control = I915_READ(CURCNTR(i)); | 9232 | error->cursor[i].control = I915_READ(CURCNTR(i)); |
8447 | error->cursor[i].position = I915_READ(CURPOS(i)); | 9233 | error->cursor[i].position = I915_READ(CURPOS(i)); |
8448 | error->cursor[i].base = I915_READ(CURBASE(i)); | 9234 | error->cursor[i].base = I915_READ(CURBASE(i)); |
@@ -8457,14 +9243,14 @@ intel_display_capture_error_state(struct drm_device *dev) | |||
8457 | error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); | 9243 | error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); |
8458 | } | 9244 | } |
8459 | 9245 | ||
8460 | error->pipe[i].conf = I915_READ(PIPECONF(i)); | 9246 | error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder)); |
8461 | error->pipe[i].source = I915_READ(PIPESRC(i)); | 9247 | error->pipe[i].source = I915_READ(PIPESRC(i)); |
8462 | error->pipe[i].htotal = I915_READ(HTOTAL(i)); | 9248 | error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); |
8463 | error->pipe[i].hblank = I915_READ(HBLANK(i)); | 9249 | error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder)); |
8464 | error->pipe[i].hsync = I915_READ(HSYNC(i)); | 9250 | error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder)); |
8465 | error->pipe[i].vtotal = I915_READ(VTOTAL(i)); | 9251 | error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); |
8466 | error->pipe[i].vblank = I915_READ(VBLANK(i)); | 9252 | error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder)); |
8467 | error->pipe[i].vsync = I915_READ(VSYNC(i)); | 9253 | error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder)); |
8468 | } | 9254 | } |
8469 | 9255 | ||
8470 | return error; | 9256 | return error; |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 368ed8ef1600..a9ba88a9b1ab 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -36,8 +36,6 @@ | |||
36 | #include <drm/i915_drm.h> | 36 | #include <drm/i915_drm.h> |
37 | #include "i915_drv.h" | 37 | #include "i915_drv.h" |
38 | 38 | ||
39 | #define DP_RECEIVER_CAP_SIZE 0xf | ||
40 | #define DP_LINK_STATUS_SIZE 6 | ||
41 | #define DP_LINK_CHECK_TIMEOUT (10 * 1000) | 39 | #define DP_LINK_CHECK_TIMEOUT (10 * 1000) |
42 | 40 | ||
43 | /** | 41 | /** |
@@ -49,7 +47,9 @@ | |||
49 | */ | 47 | */ |
50 | static bool is_edp(struct intel_dp *intel_dp) | 48 | static bool is_edp(struct intel_dp *intel_dp) |
51 | { | 49 | { |
52 | return intel_dp->base.type == INTEL_OUTPUT_EDP; | 50 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
51 | |||
52 | return intel_dig_port->base.type == INTEL_OUTPUT_EDP; | ||
53 | } | 53 | } |
54 | 54 | ||
55 | /** | 55 | /** |
@@ -76,15 +76,16 @@ static bool is_cpu_edp(struct intel_dp *intel_dp) | |||
76 | return is_edp(intel_dp) && !is_pch_edp(intel_dp); | 76 | return is_edp(intel_dp) && !is_pch_edp(intel_dp); |
77 | } | 77 | } |
78 | 78 | ||
79 | static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) | 79 | static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp) |
80 | { | 80 | { |
81 | return container_of(encoder, struct intel_dp, base.base); | 81 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
82 | |||
83 | return intel_dig_port->base.base.dev; | ||
82 | } | 84 | } |
83 | 85 | ||
84 | static struct intel_dp *intel_attached_dp(struct drm_connector *connector) | 86 | static struct intel_dp *intel_attached_dp(struct drm_connector *connector) |
85 | { | 87 | { |
86 | return container_of(intel_attached_encoder(connector), | 88 | return enc_to_intel_dp(&intel_attached_encoder(connector)->base); |
87 | struct intel_dp, base); | ||
88 | } | 89 | } |
89 | 90 | ||
90 | /** | 91 | /** |
@@ -106,49 +107,32 @@ bool intel_encoder_is_pch_edp(struct drm_encoder *encoder) | |||
106 | return is_pch_edp(intel_dp); | 107 | return is_pch_edp(intel_dp); |
107 | } | 108 | } |
108 | 109 | ||
109 | static void intel_dp_start_link_train(struct intel_dp *intel_dp); | ||
110 | static void intel_dp_complete_link_train(struct intel_dp *intel_dp); | ||
111 | static void intel_dp_link_down(struct intel_dp *intel_dp); | 110 | static void intel_dp_link_down(struct intel_dp *intel_dp); |
112 | 111 | ||
113 | void | 112 | void |
114 | intel_edp_link_config(struct intel_encoder *intel_encoder, | 113 | intel_edp_link_config(struct intel_encoder *intel_encoder, |
115 | int *lane_num, int *link_bw) | 114 | int *lane_num, int *link_bw) |
116 | { | 115 | { |
117 | struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); | 116 | struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); |
118 | 117 | ||
119 | *lane_num = intel_dp->lane_count; | 118 | *lane_num = intel_dp->lane_count; |
120 | if (intel_dp->link_bw == DP_LINK_BW_1_62) | 119 | *link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); |
121 | *link_bw = 162000; | ||
122 | else if (intel_dp->link_bw == DP_LINK_BW_2_7) | ||
123 | *link_bw = 270000; | ||
124 | } | 120 | } |
125 | 121 | ||
126 | int | 122 | int |
127 | intel_edp_target_clock(struct intel_encoder *intel_encoder, | 123 | intel_edp_target_clock(struct intel_encoder *intel_encoder, |
128 | struct drm_display_mode *mode) | 124 | struct drm_display_mode *mode) |
129 | { | 125 | { |
130 | struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); | 126 | struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); |
127 | struct intel_connector *intel_connector = intel_dp->attached_connector; | ||
131 | 128 | ||
132 | if (intel_dp->panel_fixed_mode) | 129 | if (intel_connector->panel.fixed_mode) |
133 | return intel_dp->panel_fixed_mode->clock; | 130 | return intel_connector->panel.fixed_mode->clock; |
134 | else | 131 | else |
135 | return mode->clock; | 132 | return mode->clock; |
136 | } | 133 | } |
137 | 134 | ||
138 | static int | 135 | static int |
139 | intel_dp_max_lane_count(struct intel_dp *intel_dp) | ||
140 | { | ||
141 | int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; | ||
142 | switch (max_lane_count) { | ||
143 | case 1: case 2: case 4: | ||
144 | break; | ||
145 | default: | ||
146 | max_lane_count = 4; | ||
147 | } | ||
148 | return max_lane_count; | ||
149 | } | ||
150 | |||
151 | static int | ||
152 | intel_dp_max_link_bw(struct intel_dp *intel_dp) | 136 | intel_dp_max_link_bw(struct intel_dp *intel_dp) |
153 | { | 137 | { |
154 | int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; | 138 | int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; |
@@ -208,7 +192,7 @@ intel_dp_adjust_dithering(struct intel_dp *intel_dp, | |||
208 | bool adjust_mode) | 192 | bool adjust_mode) |
209 | { | 193 | { |
210 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); | 194 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); |
211 | int max_lanes = intel_dp_max_lane_count(intel_dp); | 195 | int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); |
212 | int max_rate, mode_rate; | 196 | int max_rate, mode_rate; |
213 | 197 | ||
214 | mode_rate = intel_dp_link_required(mode->clock, 24); | 198 | mode_rate = intel_dp_link_required(mode->clock, 24); |
@@ -234,12 +218,14 @@ intel_dp_mode_valid(struct drm_connector *connector, | |||
234 | struct drm_display_mode *mode) | 218 | struct drm_display_mode *mode) |
235 | { | 219 | { |
236 | struct intel_dp *intel_dp = intel_attached_dp(connector); | 220 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
221 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
222 | struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; | ||
237 | 223 | ||
238 | if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { | 224 | if (is_edp(intel_dp) && fixed_mode) { |
239 | if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay) | 225 | if (mode->hdisplay > fixed_mode->hdisplay) |
240 | return MODE_PANEL; | 226 | return MODE_PANEL; |
241 | 227 | ||
242 | if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay) | 228 | if (mode->vdisplay > fixed_mode->vdisplay) |
243 | return MODE_PANEL; | 229 | return MODE_PANEL; |
244 | } | 230 | } |
245 | 231 | ||
@@ -285,6 +271,10 @@ intel_hrawclk(struct drm_device *dev) | |||
285 | struct drm_i915_private *dev_priv = dev->dev_private; | 271 | struct drm_i915_private *dev_priv = dev->dev_private; |
286 | uint32_t clkcfg; | 272 | uint32_t clkcfg; |
287 | 273 | ||
274 | /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ | ||
275 | if (IS_VALLEYVIEW(dev)) | ||
276 | return 200; | ||
277 | |||
288 | clkcfg = I915_READ(CLKCFG); | 278 | clkcfg = I915_READ(CLKCFG); |
289 | switch (clkcfg & CLKCFG_FSB_MASK) { | 279 | switch (clkcfg & CLKCFG_FSB_MASK) { |
290 | case CLKCFG_FSB_400: | 280 | case CLKCFG_FSB_400: |
@@ -310,7 +300,7 @@ intel_hrawclk(struct drm_device *dev) | |||
310 | 300 | ||
311 | static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) | 301 | static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) |
312 | { | 302 | { |
313 | struct drm_device *dev = intel_dp->base.base.dev; | 303 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
314 | struct drm_i915_private *dev_priv = dev->dev_private; | 304 | struct drm_i915_private *dev_priv = dev->dev_private; |
315 | 305 | ||
316 | return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0; | 306 | return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0; |
@@ -318,7 +308,7 @@ static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) | |||
318 | 308 | ||
319 | static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) | 309 | static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) |
320 | { | 310 | { |
321 | struct drm_device *dev = intel_dp->base.base.dev; | 311 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
322 | struct drm_i915_private *dev_priv = dev->dev_private; | 312 | struct drm_i915_private *dev_priv = dev->dev_private; |
323 | 313 | ||
324 | return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0; | 314 | return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0; |
@@ -327,7 +317,7 @@ static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) | |||
327 | static void | 317 | static void |
328 | intel_dp_check_edp(struct intel_dp *intel_dp) | 318 | intel_dp_check_edp(struct intel_dp *intel_dp) |
329 | { | 319 | { |
330 | struct drm_device *dev = intel_dp->base.base.dev; | 320 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
331 | struct drm_i915_private *dev_priv = dev->dev_private; | 321 | struct drm_i915_private *dev_priv = dev->dev_private; |
332 | 322 | ||
333 | if (!is_edp(intel_dp)) | 323 | if (!is_edp(intel_dp)) |
@@ -346,7 +336,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
346 | uint8_t *recv, int recv_size) | 336 | uint8_t *recv, int recv_size) |
347 | { | 337 | { |
348 | uint32_t output_reg = intel_dp->output_reg; | 338 | uint32_t output_reg = intel_dp->output_reg; |
349 | struct drm_device *dev = intel_dp->base.base.dev; | 339 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
340 | struct drm_device *dev = intel_dig_port->base.base.dev; | ||
350 | struct drm_i915_private *dev_priv = dev->dev_private; | 341 | struct drm_i915_private *dev_priv = dev->dev_private; |
351 | uint32_t ch_ctl = output_reg + 0x10; | 342 | uint32_t ch_ctl = output_reg + 0x10; |
352 | uint32_t ch_data = ch_ctl + 4; | 343 | uint32_t ch_data = ch_ctl + 4; |
@@ -356,6 +347,29 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
356 | uint32_t aux_clock_divider; | 347 | uint32_t aux_clock_divider; |
357 | int try, precharge; | 348 | int try, precharge; |
358 | 349 | ||
350 | if (IS_HASWELL(dev)) { | ||
351 | switch (intel_dig_port->port) { | ||
352 | case PORT_A: | ||
353 | ch_ctl = DPA_AUX_CH_CTL; | ||
354 | ch_data = DPA_AUX_CH_DATA1; | ||
355 | break; | ||
356 | case PORT_B: | ||
357 | ch_ctl = PCH_DPB_AUX_CH_CTL; | ||
358 | ch_data = PCH_DPB_AUX_CH_DATA1; | ||
359 | break; | ||
360 | case PORT_C: | ||
361 | ch_ctl = PCH_DPC_AUX_CH_CTL; | ||
362 | ch_data = PCH_DPC_AUX_CH_DATA1; | ||
363 | break; | ||
364 | case PORT_D: | ||
365 | ch_ctl = PCH_DPD_AUX_CH_CTL; | ||
366 | ch_data = PCH_DPD_AUX_CH_DATA1; | ||
367 | break; | ||
368 | default: | ||
369 | BUG(); | ||
370 | } | ||
371 | } | ||
372 | |||
359 | intel_dp_check_edp(intel_dp); | 373 | intel_dp_check_edp(intel_dp); |
360 | /* The clock divider is based off the hrawclk, | 374 | /* The clock divider is based off the hrawclk, |
361 | * and would like to run at 2MHz. So, take the | 375 | * and would like to run at 2MHz. So, take the |
@@ -365,12 +379,16 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
365 | * clock divider. | 379 | * clock divider. |
366 | */ | 380 | */ |
367 | if (is_cpu_edp(intel_dp)) { | 381 | if (is_cpu_edp(intel_dp)) { |
368 | if (IS_GEN6(dev) || IS_GEN7(dev)) | 382 | if (IS_HASWELL(dev)) |
383 | aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1; | ||
384 | else if (IS_VALLEYVIEW(dev)) | ||
385 | aux_clock_divider = 100; | ||
386 | else if (IS_GEN6(dev) || IS_GEN7(dev)) | ||
369 | aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ | 387 | aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ |
370 | else | 388 | else |
371 | aux_clock_divider = 225; /* eDP input clock at 450Mhz */ | 389 | aux_clock_divider = 225; /* eDP input clock at 450Mhz */ |
372 | } else if (HAS_PCH_SPLIT(dev)) | 390 | } else if (HAS_PCH_SPLIT(dev)) |
373 | aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */ | 391 | aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2); |
374 | else | 392 | else |
375 | aux_clock_divider = intel_hrawclk(dev) / 2; | 393 | aux_clock_divider = intel_hrawclk(dev) / 2; |
376 | 394 | ||
@@ -642,9 +660,6 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
642 | return -EREMOTEIO; | 660 | return -EREMOTEIO; |
643 | } | 661 | } |
644 | 662 | ||
645 | static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp); | ||
646 | static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); | ||
647 | |||
648 | static int | 663 | static int |
649 | intel_dp_i2c_init(struct intel_dp *intel_dp, | 664 | intel_dp_i2c_init(struct intel_dp *intel_dp, |
650 | struct intel_connector *intel_connector, const char *name) | 665 | struct intel_connector *intel_connector, const char *name) |
@@ -670,22 +685,25 @@ intel_dp_i2c_init(struct intel_dp *intel_dp, | |||
670 | return ret; | 685 | return ret; |
671 | } | 686 | } |
672 | 687 | ||
673 | static bool | 688 | bool |
674 | intel_dp_mode_fixup(struct drm_encoder *encoder, | 689 | intel_dp_mode_fixup(struct drm_encoder *encoder, |
675 | const struct drm_display_mode *mode, | 690 | const struct drm_display_mode *mode, |
676 | struct drm_display_mode *adjusted_mode) | 691 | struct drm_display_mode *adjusted_mode) |
677 | { | 692 | { |
678 | struct drm_device *dev = encoder->dev; | 693 | struct drm_device *dev = encoder->dev; |
679 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 694 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
695 | struct intel_connector *intel_connector = intel_dp->attached_connector; | ||
680 | int lane_count, clock; | 696 | int lane_count, clock; |
681 | int max_lane_count = intel_dp_max_lane_count(intel_dp); | 697 | int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); |
682 | int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; | 698 | int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; |
683 | int bpp, mode_rate; | 699 | int bpp, mode_rate; |
684 | static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; | 700 | static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; |
685 | 701 | ||
686 | if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { | 702 | if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { |
687 | intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode); | 703 | intel_fixed_panel_mode(intel_connector->panel.fixed_mode, |
688 | intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, | 704 | adjusted_mode); |
705 | intel_pch_panel_fitting(dev, | ||
706 | intel_connector->panel.fitting_mode, | ||
689 | mode, adjusted_mode); | 707 | mode, adjusted_mode); |
690 | } | 708 | } |
691 | 709 | ||
@@ -762,21 +780,23 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
762 | struct drm_display_mode *adjusted_mode) | 780 | struct drm_display_mode *adjusted_mode) |
763 | { | 781 | { |
764 | struct drm_device *dev = crtc->dev; | 782 | struct drm_device *dev = crtc->dev; |
765 | struct intel_encoder *encoder; | 783 | struct intel_encoder *intel_encoder; |
784 | struct intel_dp *intel_dp; | ||
766 | struct drm_i915_private *dev_priv = dev->dev_private; | 785 | struct drm_i915_private *dev_priv = dev->dev_private; |
767 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 786 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
768 | int lane_count = 4; | 787 | int lane_count = 4; |
769 | struct intel_dp_m_n m_n; | 788 | struct intel_dp_m_n m_n; |
770 | int pipe = intel_crtc->pipe; | 789 | int pipe = intel_crtc->pipe; |
790 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; | ||
771 | 791 | ||
772 | /* | 792 | /* |
773 | * Find the lane count in the intel_encoder private | 793 | * Find the lane count in the intel_encoder private |
774 | */ | 794 | */ |
775 | for_each_encoder_on_crtc(dev, crtc, encoder) { | 795 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
776 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | 796 | intel_dp = enc_to_intel_dp(&intel_encoder->base); |
777 | 797 | ||
778 | if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || | 798 | if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || |
779 | intel_dp->base.type == INTEL_OUTPUT_EDP) | 799 | intel_encoder->type == INTEL_OUTPUT_EDP) |
780 | { | 800 | { |
781 | lane_count = intel_dp->lane_count; | 801 | lane_count = intel_dp->lane_count; |
782 | break; | 802 | break; |
@@ -791,23 +811,46 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
791 | intel_dp_compute_m_n(intel_crtc->bpp, lane_count, | 811 | intel_dp_compute_m_n(intel_crtc->bpp, lane_count, |
792 | mode->clock, adjusted_mode->clock, &m_n); | 812 | mode->clock, adjusted_mode->clock, &m_n); |
793 | 813 | ||
794 | if (HAS_PCH_SPLIT(dev)) { | 814 | if (IS_HASWELL(dev)) { |
795 | I915_WRITE(TRANSDATA_M1(pipe), | 815 | I915_WRITE(PIPE_DATA_M1(cpu_transcoder), |
796 | ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | | 816 | TU_SIZE(m_n.tu) | m_n.gmch_m); |
797 | m_n.gmch_m); | 817 | I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n); |
818 | I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m); | ||
819 | I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n); | ||
820 | } else if (HAS_PCH_SPLIT(dev)) { | ||
821 | I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); | ||
798 | I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n); | 822 | I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n); |
799 | I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m); | 823 | I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m); |
800 | I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n); | 824 | I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n); |
825 | } else if (IS_VALLEYVIEW(dev)) { | ||
826 | I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); | ||
827 | I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); | ||
828 | I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); | ||
829 | I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); | ||
801 | } else { | 830 | } else { |
802 | I915_WRITE(PIPE_GMCH_DATA_M(pipe), | 831 | I915_WRITE(PIPE_GMCH_DATA_M(pipe), |
803 | ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | | 832 | TU_SIZE(m_n.tu) | m_n.gmch_m); |
804 | m_n.gmch_m); | ||
805 | I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n); | 833 | I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n); |
806 | I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m); | 834 | I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m); |
807 | I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n); | 835 | I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n); |
808 | } | 836 | } |
809 | } | 837 | } |
810 | 838 | ||
839 | void intel_dp_init_link_config(struct intel_dp *intel_dp) | ||
840 | { | ||
841 | memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); | ||
842 | intel_dp->link_configuration[0] = intel_dp->link_bw; | ||
843 | intel_dp->link_configuration[1] = intel_dp->lane_count; | ||
844 | intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; | ||
845 | /* | ||
846 | * Check for DPCD version > 1.1 and enhanced framing support | ||
847 | */ | ||
848 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && | ||
849 | (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { | ||
850 | intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; | ||
851 | } | ||
852 | } | ||
853 | |||
811 | static void | 854 | static void |
812 | intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | 855 | intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, |
813 | struct drm_display_mode *adjusted_mode) | 856 | struct drm_display_mode *adjusted_mode) |
@@ -815,7 +858,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
815 | struct drm_device *dev = encoder->dev; | 858 | struct drm_device *dev = encoder->dev; |
816 | struct drm_i915_private *dev_priv = dev->dev_private; | 859 | struct drm_i915_private *dev_priv = dev->dev_private; |
817 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 860 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
818 | struct drm_crtc *crtc = intel_dp->base.base.crtc; | 861 | struct drm_crtc *crtc = encoder->crtc; |
819 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 862 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
820 | 863 | ||
821 | /* | 864 | /* |
@@ -860,21 +903,12 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
860 | intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; | 903 | intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; |
861 | intel_write_eld(encoder, adjusted_mode); | 904 | intel_write_eld(encoder, adjusted_mode); |
862 | } | 905 | } |
863 | memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); | 906 | |
864 | intel_dp->link_configuration[0] = intel_dp->link_bw; | 907 | intel_dp_init_link_config(intel_dp); |
865 | intel_dp->link_configuration[1] = intel_dp->lane_count; | ||
866 | intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; | ||
867 | /* | ||
868 | * Check for DPCD version > 1.1 and enhanced framing support | ||
869 | */ | ||
870 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && | ||
871 | (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { | ||
872 | intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; | ||
873 | } | ||
874 | 908 | ||
875 | /* Split out the IBX/CPU vs CPT settings */ | 909 | /* Split out the IBX/CPU vs CPT settings */ |
876 | 910 | ||
877 | if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) { | 911 | if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { |
878 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | 912 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
879 | intel_dp->DP |= DP_SYNC_HS_HIGH; | 913 | intel_dp->DP |= DP_SYNC_HS_HIGH; |
880 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | 914 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
@@ -931,7 +965,7 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp, | |||
931 | u32 mask, | 965 | u32 mask, |
932 | u32 value) | 966 | u32 value) |
933 | { | 967 | { |
934 | struct drm_device *dev = intel_dp->base.base.dev; | 968 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
935 | struct drm_i915_private *dev_priv = dev->dev_private; | 969 | struct drm_i915_private *dev_priv = dev->dev_private; |
936 | 970 | ||
937 | DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", | 971 | DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", |
@@ -978,9 +1012,9 @@ static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv) | |||
978 | return control; | 1012 | return control; |
979 | } | 1013 | } |
980 | 1014 | ||
981 | static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) | 1015 | void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) |
982 | { | 1016 | { |
983 | struct drm_device *dev = intel_dp->base.base.dev; | 1017 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
984 | struct drm_i915_private *dev_priv = dev->dev_private; | 1018 | struct drm_i915_private *dev_priv = dev->dev_private; |
985 | u32 pp; | 1019 | u32 pp; |
986 | 1020 | ||
@@ -1019,7 +1053,7 @@ static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) | |||
1019 | 1053 | ||
1020 | static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) | 1054 | static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) |
1021 | { | 1055 | { |
1022 | struct drm_device *dev = intel_dp->base.base.dev; | 1056 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1023 | struct drm_i915_private *dev_priv = dev->dev_private; | 1057 | struct drm_i915_private *dev_priv = dev->dev_private; |
1024 | u32 pp; | 1058 | u32 pp; |
1025 | 1059 | ||
@@ -1041,14 +1075,14 @@ static void ironlake_panel_vdd_work(struct work_struct *__work) | |||
1041 | { | 1075 | { |
1042 | struct intel_dp *intel_dp = container_of(to_delayed_work(__work), | 1076 | struct intel_dp *intel_dp = container_of(to_delayed_work(__work), |
1043 | struct intel_dp, panel_vdd_work); | 1077 | struct intel_dp, panel_vdd_work); |
1044 | struct drm_device *dev = intel_dp->base.base.dev; | 1078 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1045 | 1079 | ||
1046 | mutex_lock(&dev->mode_config.mutex); | 1080 | mutex_lock(&dev->mode_config.mutex); |
1047 | ironlake_panel_vdd_off_sync(intel_dp); | 1081 | ironlake_panel_vdd_off_sync(intel_dp); |
1048 | mutex_unlock(&dev->mode_config.mutex); | 1082 | mutex_unlock(&dev->mode_config.mutex); |
1049 | } | 1083 | } |
1050 | 1084 | ||
1051 | static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) | 1085 | void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) |
1052 | { | 1086 | { |
1053 | if (!is_edp(intel_dp)) | 1087 | if (!is_edp(intel_dp)) |
1054 | return; | 1088 | return; |
@@ -1071,9 +1105,9 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) | |||
1071 | } | 1105 | } |
1072 | } | 1106 | } |
1073 | 1107 | ||
1074 | static void ironlake_edp_panel_on(struct intel_dp *intel_dp) | 1108 | void ironlake_edp_panel_on(struct intel_dp *intel_dp) |
1075 | { | 1109 | { |
1076 | struct drm_device *dev = intel_dp->base.base.dev; | 1110 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1077 | struct drm_i915_private *dev_priv = dev->dev_private; | 1111 | struct drm_i915_private *dev_priv = dev->dev_private; |
1078 | u32 pp; | 1112 | u32 pp; |
1079 | 1113 | ||
@@ -1113,9 +1147,9 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp) | |||
1113 | } | 1147 | } |
1114 | } | 1148 | } |
1115 | 1149 | ||
1116 | static void ironlake_edp_panel_off(struct intel_dp *intel_dp) | 1150 | void ironlake_edp_panel_off(struct intel_dp *intel_dp) |
1117 | { | 1151 | { |
1118 | struct drm_device *dev = intel_dp->base.base.dev; | 1152 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1119 | struct drm_i915_private *dev_priv = dev->dev_private; | 1153 | struct drm_i915_private *dev_priv = dev->dev_private; |
1120 | u32 pp; | 1154 | u32 pp; |
1121 | 1155 | ||
@@ -1138,10 +1172,12 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp) | |||
1138 | ironlake_wait_panel_off(intel_dp); | 1172 | ironlake_wait_panel_off(intel_dp); |
1139 | } | 1173 | } |
1140 | 1174 | ||
1141 | static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) | 1175 | void ironlake_edp_backlight_on(struct intel_dp *intel_dp) |
1142 | { | 1176 | { |
1143 | struct drm_device *dev = intel_dp->base.base.dev; | 1177 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
1178 | struct drm_device *dev = intel_dig_port->base.base.dev; | ||
1144 | struct drm_i915_private *dev_priv = dev->dev_private; | 1179 | struct drm_i915_private *dev_priv = dev->dev_private; |
1180 | int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe; | ||
1145 | u32 pp; | 1181 | u32 pp; |
1146 | 1182 | ||
1147 | if (!is_edp(intel_dp)) | 1183 | if (!is_edp(intel_dp)) |
@@ -1159,17 +1195,21 @@ static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) | |||
1159 | pp |= EDP_BLC_ENABLE; | 1195 | pp |= EDP_BLC_ENABLE; |
1160 | I915_WRITE(PCH_PP_CONTROL, pp); | 1196 | I915_WRITE(PCH_PP_CONTROL, pp); |
1161 | POSTING_READ(PCH_PP_CONTROL); | 1197 | POSTING_READ(PCH_PP_CONTROL); |
1198 | |||
1199 | intel_panel_enable_backlight(dev, pipe); | ||
1162 | } | 1200 | } |
1163 | 1201 | ||
1164 | static void ironlake_edp_backlight_off(struct intel_dp *intel_dp) | 1202 | void ironlake_edp_backlight_off(struct intel_dp *intel_dp) |
1165 | { | 1203 | { |
1166 | struct drm_device *dev = intel_dp->base.base.dev; | 1204 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1167 | struct drm_i915_private *dev_priv = dev->dev_private; | 1205 | struct drm_i915_private *dev_priv = dev->dev_private; |
1168 | u32 pp; | 1206 | u32 pp; |
1169 | 1207 | ||
1170 | if (!is_edp(intel_dp)) | 1208 | if (!is_edp(intel_dp)) |
1171 | return; | 1209 | return; |
1172 | 1210 | ||
1211 | intel_panel_disable_backlight(dev); | ||
1212 | |||
1173 | DRM_DEBUG_KMS("\n"); | 1213 | DRM_DEBUG_KMS("\n"); |
1174 | pp = ironlake_get_pp_control(dev_priv); | 1214 | pp = ironlake_get_pp_control(dev_priv); |
1175 | pp &= ~EDP_BLC_ENABLE; | 1215 | pp &= ~EDP_BLC_ENABLE; |
@@ -1180,8 +1220,9 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp) | |||
1180 | 1220 | ||
1181 | static void ironlake_edp_pll_on(struct intel_dp *intel_dp) | 1221 | static void ironlake_edp_pll_on(struct intel_dp *intel_dp) |
1182 | { | 1222 | { |
1183 | struct drm_device *dev = intel_dp->base.base.dev; | 1223 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
1184 | struct drm_crtc *crtc = intel_dp->base.base.crtc; | 1224 | struct drm_crtc *crtc = intel_dig_port->base.base.crtc; |
1225 | struct drm_device *dev = crtc->dev; | ||
1185 | struct drm_i915_private *dev_priv = dev->dev_private; | 1226 | struct drm_i915_private *dev_priv = dev->dev_private; |
1186 | u32 dpa_ctl; | 1227 | u32 dpa_ctl; |
1187 | 1228 | ||
@@ -1205,8 +1246,9 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp) | |||
1205 | 1246 | ||
1206 | static void ironlake_edp_pll_off(struct intel_dp *intel_dp) | 1247 | static void ironlake_edp_pll_off(struct intel_dp *intel_dp) |
1207 | { | 1248 | { |
1208 | struct drm_device *dev = intel_dp->base.base.dev; | 1249 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
1209 | struct drm_crtc *crtc = intel_dp->base.base.crtc; | 1250 | struct drm_crtc *crtc = intel_dig_port->base.base.crtc; |
1251 | struct drm_device *dev = crtc->dev; | ||
1210 | struct drm_i915_private *dev_priv = dev->dev_private; | 1252 | struct drm_i915_private *dev_priv = dev->dev_private; |
1211 | u32 dpa_ctl; | 1253 | u32 dpa_ctl; |
1212 | 1254 | ||
@@ -1228,7 +1270,7 @@ static void ironlake_edp_pll_off(struct intel_dp *intel_dp) | |||
1228 | } | 1270 | } |
1229 | 1271 | ||
1230 | /* If the sink supports it, try to set the power state appropriately */ | 1272 | /* If the sink supports it, try to set the power state appropriately */ |
1231 | static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) | 1273 | void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) |
1232 | { | 1274 | { |
1233 | int ret, i; | 1275 | int ret, i; |
1234 | 1276 | ||
@@ -1298,9 +1340,10 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder, | |||
1298 | return true; | 1340 | return true; |
1299 | } | 1341 | } |
1300 | } | 1342 | } |
1301 | } | ||
1302 | 1343 | ||
1303 | DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg); | 1344 | DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", |
1345 | intel_dp->output_reg); | ||
1346 | } | ||
1304 | 1347 | ||
1305 | return true; | 1348 | return true; |
1306 | } | 1349 | } |
@@ -1396,38 +1439,6 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ | |||
1396 | DP_LINK_STATUS_SIZE); | 1439 | DP_LINK_STATUS_SIZE); |
1397 | } | 1440 | } |
1398 | 1441 | ||
1399 | static uint8_t | ||
1400 | intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], | ||
1401 | int r) | ||
1402 | { | ||
1403 | return link_status[r - DP_LANE0_1_STATUS]; | ||
1404 | } | ||
1405 | |||
1406 | static uint8_t | ||
1407 | intel_get_adjust_request_voltage(uint8_t adjust_request[2], | ||
1408 | int lane) | ||
1409 | { | ||
1410 | int s = ((lane & 1) ? | ||
1411 | DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : | ||
1412 | DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); | ||
1413 | uint8_t l = adjust_request[lane>>1]; | ||
1414 | |||
1415 | return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; | ||
1416 | } | ||
1417 | |||
1418 | static uint8_t | ||
1419 | intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2], | ||
1420 | int lane) | ||
1421 | { | ||
1422 | int s = ((lane & 1) ? | ||
1423 | DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : | ||
1424 | DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); | ||
1425 | uint8_t l = adjust_request[lane>>1]; | ||
1426 | |||
1427 | return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; | ||
1428 | } | ||
1429 | |||
1430 | |||
1431 | #if 0 | 1442 | #if 0 |
1432 | static char *voltage_names[] = { | 1443 | static char *voltage_names[] = { |
1433 | "0.4V", "0.6V", "0.8V", "1.2V" | 1444 | "0.4V", "0.6V", "0.8V", "1.2V" |
@@ -1448,7 +1459,7 @@ static char *link_train_names[] = { | |||
1448 | static uint8_t | 1459 | static uint8_t |
1449 | intel_dp_voltage_max(struct intel_dp *intel_dp) | 1460 | intel_dp_voltage_max(struct intel_dp *intel_dp) |
1450 | { | 1461 | { |
1451 | struct drm_device *dev = intel_dp->base.base.dev; | 1462 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1452 | 1463 | ||
1453 | if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) | 1464 | if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) |
1454 | return DP_TRAIN_VOLTAGE_SWING_800; | 1465 | return DP_TRAIN_VOLTAGE_SWING_800; |
@@ -1461,9 +1472,21 @@ intel_dp_voltage_max(struct intel_dp *intel_dp) | |||
1461 | static uint8_t | 1472 | static uint8_t |
1462 | intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) | 1473 | intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) |
1463 | { | 1474 | { |
1464 | struct drm_device *dev = intel_dp->base.base.dev; | 1475 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1465 | 1476 | ||
1466 | if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { | 1477 | if (IS_HASWELL(dev)) { |
1478 | switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { | ||
1479 | case DP_TRAIN_VOLTAGE_SWING_400: | ||
1480 | return DP_TRAIN_PRE_EMPHASIS_9_5; | ||
1481 | case DP_TRAIN_VOLTAGE_SWING_600: | ||
1482 | return DP_TRAIN_PRE_EMPHASIS_6; | ||
1483 | case DP_TRAIN_VOLTAGE_SWING_800: | ||
1484 | return DP_TRAIN_PRE_EMPHASIS_3_5; | ||
1485 | case DP_TRAIN_VOLTAGE_SWING_1200: | ||
1486 | default: | ||
1487 | return DP_TRAIN_PRE_EMPHASIS_0; | ||
1488 | } | ||
1489 | } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { | ||
1467 | switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { | 1490 | switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { |
1468 | case DP_TRAIN_VOLTAGE_SWING_400: | 1491 | case DP_TRAIN_VOLTAGE_SWING_400: |
1469 | return DP_TRAIN_PRE_EMPHASIS_6; | 1492 | return DP_TRAIN_PRE_EMPHASIS_6; |
@@ -1494,13 +1517,12 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST | |||
1494 | uint8_t v = 0; | 1517 | uint8_t v = 0; |
1495 | uint8_t p = 0; | 1518 | uint8_t p = 0; |
1496 | int lane; | 1519 | int lane; |
1497 | uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS); | ||
1498 | uint8_t voltage_max; | 1520 | uint8_t voltage_max; |
1499 | uint8_t preemph_max; | 1521 | uint8_t preemph_max; |
1500 | 1522 | ||
1501 | for (lane = 0; lane < intel_dp->lane_count; lane++) { | 1523 | for (lane = 0; lane < intel_dp->lane_count; lane++) { |
1502 | uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane); | 1524 | uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); |
1503 | uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane); | 1525 | uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); |
1504 | 1526 | ||
1505 | if (this_v > v) | 1527 | if (this_v > v) |
1506 | v = this_v; | 1528 | v = this_v; |
@@ -1617,52 +1639,38 @@ intel_gen7_edp_signal_levels(uint8_t train_set) | |||
1617 | } | 1639 | } |
1618 | } | 1640 | } |
1619 | 1641 | ||
1620 | static uint8_t | 1642 | /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */ |
1621 | intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], | 1643 | static uint32_t |
1622 | int lane) | 1644 | intel_dp_signal_levels_hsw(uint8_t train_set) |
1623 | { | ||
1624 | int s = (lane & 1) * 4; | ||
1625 | uint8_t l = link_status[lane>>1]; | ||
1626 | |||
1627 | return (l >> s) & 0xf; | ||
1628 | } | ||
1629 | |||
1630 | /* Check for clock recovery is done on all channels */ | ||
1631 | static bool | ||
1632 | intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) | ||
1633 | { | 1645 | { |
1634 | int lane; | 1646 | int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | |
1635 | uint8_t lane_status; | 1647 | DP_TRAIN_PRE_EMPHASIS_MASK); |
1636 | 1648 | switch (signal_levels) { | |
1637 | for (lane = 0; lane < lane_count; lane++) { | 1649 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: |
1638 | lane_status = intel_get_lane_status(link_status, lane); | 1650 | return DDI_BUF_EMP_400MV_0DB_HSW; |
1639 | if ((lane_status & DP_LANE_CR_DONE) == 0) | 1651 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: |
1640 | return false; | 1652 | return DDI_BUF_EMP_400MV_3_5DB_HSW; |
1641 | } | 1653 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: |
1642 | return true; | 1654 | return DDI_BUF_EMP_400MV_6DB_HSW; |
1643 | } | 1655 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5: |
1656 | return DDI_BUF_EMP_400MV_9_5DB_HSW; | ||
1644 | 1657 | ||
1645 | /* Check to see if channel eq is done on all channels */ | 1658 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: |
1646 | #define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\ | 1659 | return DDI_BUF_EMP_600MV_0DB_HSW; |
1647 | DP_LANE_CHANNEL_EQ_DONE|\ | 1660 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: |
1648 | DP_LANE_SYMBOL_LOCKED) | 1661 | return DDI_BUF_EMP_600MV_3_5DB_HSW; |
1649 | static bool | 1662 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: |
1650 | intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) | 1663 | return DDI_BUF_EMP_600MV_6DB_HSW; |
1651 | { | ||
1652 | uint8_t lane_align; | ||
1653 | uint8_t lane_status; | ||
1654 | int lane; | ||
1655 | 1664 | ||
1656 | lane_align = intel_dp_link_status(link_status, | 1665 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: |
1657 | DP_LANE_ALIGN_STATUS_UPDATED); | 1666 | return DDI_BUF_EMP_800MV_0DB_HSW; |
1658 | if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) | 1667 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: |
1659 | return false; | 1668 | return DDI_BUF_EMP_800MV_3_5DB_HSW; |
1660 | for (lane = 0; lane < intel_dp->lane_count; lane++) { | 1669 | default: |
1661 | lane_status = intel_get_lane_status(link_status, lane); | 1670 | DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" |
1662 | if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) | 1671 | "0x%x\n", signal_levels); |
1663 | return false; | 1672 | return DDI_BUF_EMP_400MV_0DB_HSW; |
1664 | } | 1673 | } |
1665 | return true; | ||
1666 | } | 1674 | } |
1667 | 1675 | ||
1668 | static bool | 1676 | static bool |
@@ -1670,11 +1678,49 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, | |||
1670 | uint32_t dp_reg_value, | 1678 | uint32_t dp_reg_value, |
1671 | uint8_t dp_train_pat) | 1679 | uint8_t dp_train_pat) |
1672 | { | 1680 | { |
1673 | struct drm_device *dev = intel_dp->base.base.dev; | 1681 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
1682 | struct drm_device *dev = intel_dig_port->base.base.dev; | ||
1674 | struct drm_i915_private *dev_priv = dev->dev_private; | 1683 | struct drm_i915_private *dev_priv = dev->dev_private; |
1684 | enum port port = intel_dig_port->port; | ||
1675 | int ret; | 1685 | int ret; |
1686 | uint32_t temp; | ||
1676 | 1687 | ||
1677 | if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { | 1688 | if (IS_HASWELL(dev)) { |
1689 | temp = I915_READ(DP_TP_CTL(port)); | ||
1690 | |||
1691 | if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) | ||
1692 | temp |= DP_TP_CTL_SCRAMBLE_DISABLE; | ||
1693 | else | ||
1694 | temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; | ||
1695 | |||
1696 | temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; | ||
1697 | switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { | ||
1698 | case DP_TRAINING_PATTERN_DISABLE: | ||
1699 | temp |= DP_TP_CTL_LINK_TRAIN_IDLE; | ||
1700 | I915_WRITE(DP_TP_CTL(port), temp); | ||
1701 | |||
1702 | if (wait_for((I915_READ(DP_TP_STATUS(port)) & | ||
1703 | DP_TP_STATUS_IDLE_DONE), 1)) | ||
1704 | DRM_ERROR("Timed out waiting for DP idle patterns\n"); | ||
1705 | |||
1706 | temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; | ||
1707 | temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; | ||
1708 | |||
1709 | break; | ||
1710 | case DP_TRAINING_PATTERN_1: | ||
1711 | temp |= DP_TP_CTL_LINK_TRAIN_PAT1; | ||
1712 | break; | ||
1713 | case DP_TRAINING_PATTERN_2: | ||
1714 | temp |= DP_TP_CTL_LINK_TRAIN_PAT2; | ||
1715 | break; | ||
1716 | case DP_TRAINING_PATTERN_3: | ||
1717 | temp |= DP_TP_CTL_LINK_TRAIN_PAT3; | ||
1718 | break; | ||
1719 | } | ||
1720 | I915_WRITE(DP_TP_CTL(port), temp); | ||
1721 | |||
1722 | } else if (HAS_PCH_CPT(dev) && | ||
1723 | (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { | ||
1678 | dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; | 1724 | dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; |
1679 | 1725 | ||
1680 | switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { | 1726 | switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { |
@@ -1734,16 +1780,20 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, | |||
1734 | } | 1780 | } |
1735 | 1781 | ||
1736 | /* Enable corresponding port and start training pattern 1 */ | 1782 | /* Enable corresponding port and start training pattern 1 */ |
1737 | static void | 1783 | void |
1738 | intel_dp_start_link_train(struct intel_dp *intel_dp) | 1784 | intel_dp_start_link_train(struct intel_dp *intel_dp) |
1739 | { | 1785 | { |
1740 | struct drm_device *dev = intel_dp->base.base.dev; | 1786 | struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base; |
1787 | struct drm_device *dev = encoder->dev; | ||
1741 | int i; | 1788 | int i; |
1742 | uint8_t voltage; | 1789 | uint8_t voltage; |
1743 | bool clock_recovery = false; | 1790 | bool clock_recovery = false; |
1744 | int voltage_tries, loop_tries; | 1791 | int voltage_tries, loop_tries; |
1745 | uint32_t DP = intel_dp->DP; | 1792 | uint32_t DP = intel_dp->DP; |
1746 | 1793 | ||
1794 | if (IS_HASWELL(dev)) | ||
1795 | intel_ddi_prepare_link_retrain(encoder); | ||
1796 | |||
1747 | /* Write the link configuration data */ | 1797 | /* Write the link configuration data */ |
1748 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, | 1798 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, |
1749 | intel_dp->link_configuration, | 1799 | intel_dp->link_configuration, |
@@ -1761,8 +1811,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1761 | uint8_t link_status[DP_LINK_STATUS_SIZE]; | 1811 | uint8_t link_status[DP_LINK_STATUS_SIZE]; |
1762 | uint32_t signal_levels; | 1812 | uint32_t signal_levels; |
1763 | 1813 | ||
1764 | 1814 | if (IS_HASWELL(dev)) { | |
1765 | if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { | 1815 | signal_levels = intel_dp_signal_levels_hsw( |
1816 | intel_dp->train_set[0]); | ||
1817 | DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels; | ||
1818 | } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { | ||
1766 | signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); | 1819 | signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); |
1767 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; | 1820 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; |
1768 | } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { | 1821 | } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { |
@@ -1770,23 +1823,24 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1770 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; | 1823 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; |
1771 | } else { | 1824 | } else { |
1772 | signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); | 1825 | signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); |
1773 | DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels); | ||
1774 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1826 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
1775 | } | 1827 | } |
1828 | DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", | ||
1829 | signal_levels); | ||
1776 | 1830 | ||
1831 | /* Set training pattern 1 */ | ||
1777 | if (!intel_dp_set_link_train(intel_dp, DP, | 1832 | if (!intel_dp_set_link_train(intel_dp, DP, |
1778 | DP_TRAINING_PATTERN_1 | | 1833 | DP_TRAINING_PATTERN_1 | |
1779 | DP_LINK_SCRAMBLING_DISABLE)) | 1834 | DP_LINK_SCRAMBLING_DISABLE)) |
1780 | break; | 1835 | break; |
1781 | /* Set training pattern 1 */ | ||
1782 | 1836 | ||
1783 | udelay(100); | 1837 | drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); |
1784 | if (!intel_dp_get_link_status(intel_dp, link_status)) { | 1838 | if (!intel_dp_get_link_status(intel_dp, link_status)) { |
1785 | DRM_ERROR("failed to get link status\n"); | 1839 | DRM_ERROR("failed to get link status\n"); |
1786 | break; | 1840 | break; |
1787 | } | 1841 | } |
1788 | 1842 | ||
1789 | if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { | 1843 | if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { |
1790 | DRM_DEBUG_KMS("clock recovery OK\n"); | 1844 | DRM_DEBUG_KMS("clock recovery OK\n"); |
1791 | clock_recovery = true; | 1845 | clock_recovery = true; |
1792 | break; | 1846 | break; |
@@ -1825,10 +1879,10 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1825 | intel_dp->DP = DP; | 1879 | intel_dp->DP = DP; |
1826 | } | 1880 | } |
1827 | 1881 | ||
1828 | static void | 1882 | void |
1829 | intel_dp_complete_link_train(struct intel_dp *intel_dp) | 1883 | intel_dp_complete_link_train(struct intel_dp *intel_dp) |
1830 | { | 1884 | { |
1831 | struct drm_device *dev = intel_dp->base.base.dev; | 1885 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1832 | bool channel_eq = false; | 1886 | bool channel_eq = false; |
1833 | int tries, cr_tries; | 1887 | int tries, cr_tries; |
1834 | uint32_t DP = intel_dp->DP; | 1888 | uint32_t DP = intel_dp->DP; |
@@ -1848,7 +1902,10 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
1848 | break; | 1902 | break; |
1849 | } | 1903 | } |
1850 | 1904 | ||
1851 | if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { | 1905 | if (IS_HASWELL(dev)) { |
1906 | signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]); | ||
1907 | DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels; | ||
1908 | } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { | ||
1852 | signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); | 1909 | signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); |
1853 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; | 1910 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; |
1854 | } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { | 1911 | } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { |
@@ -1865,18 +1922,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
1865 | DP_LINK_SCRAMBLING_DISABLE)) | 1922 | DP_LINK_SCRAMBLING_DISABLE)) |
1866 | break; | 1923 | break; |
1867 | 1924 | ||
1868 | udelay(400); | 1925 | drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); |
1869 | if (!intel_dp_get_link_status(intel_dp, link_status)) | 1926 | if (!intel_dp_get_link_status(intel_dp, link_status)) |
1870 | break; | 1927 | break; |
1871 | 1928 | ||
1872 | /* Make sure clock is still ok */ | 1929 | /* Make sure clock is still ok */ |
1873 | if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { | 1930 | if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { |
1874 | intel_dp_start_link_train(intel_dp); | 1931 | intel_dp_start_link_train(intel_dp); |
1875 | cr_tries++; | 1932 | cr_tries++; |
1876 | continue; | 1933 | continue; |
1877 | } | 1934 | } |
1878 | 1935 | ||
1879 | if (intel_channel_eq_ok(intel_dp, link_status)) { | 1936 | if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { |
1880 | channel_eq = true; | 1937 | channel_eq = true; |
1881 | break; | 1938 | break; |
1882 | } | 1939 | } |
@@ -1895,16 +1952,38 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
1895 | ++tries; | 1952 | ++tries; |
1896 | } | 1953 | } |
1897 | 1954 | ||
1955 | if (channel_eq) | ||
1956 | DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n"); | ||
1957 | |||
1898 | intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); | 1958 | intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); |
1899 | } | 1959 | } |
1900 | 1960 | ||
1901 | static void | 1961 | static void |
1902 | intel_dp_link_down(struct intel_dp *intel_dp) | 1962 | intel_dp_link_down(struct intel_dp *intel_dp) |
1903 | { | 1963 | { |
1904 | struct drm_device *dev = intel_dp->base.base.dev; | 1964 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
1965 | struct drm_device *dev = intel_dig_port->base.base.dev; | ||
1905 | struct drm_i915_private *dev_priv = dev->dev_private; | 1966 | struct drm_i915_private *dev_priv = dev->dev_private; |
1906 | uint32_t DP = intel_dp->DP; | 1967 | uint32_t DP = intel_dp->DP; |
1907 | 1968 | ||
1969 | /* | ||
1970 | * DDI code has a strict mode set sequence and we should try to respect | ||
1971 | * it, otherwise we might hang the machine in many different ways. So we | ||
1972 | * really should be disabling the port only on a complete crtc_disable | ||
1973 | * sequence. This function is just called under two conditions on DDI | ||
1974 | * code: | ||
1975 | * - Link train failed while doing crtc_enable, and on this case we | ||
1976 | * really should respect the mode set sequence and wait for a | ||
1977 | * crtc_disable. | ||
1978 | * - Someone turned the monitor off and intel_dp_check_link_status | ||
1979 | * called us. We don't need to disable the whole port on this case, so | ||
1980 | * when someone turns the monitor on again, | ||
1981 | * intel_ddi_prepare_link_retrain will take care of redoing the link | ||
1982 | * train. | ||
1983 | */ | ||
1984 | if (IS_HASWELL(dev)) | ||
1985 | return; | ||
1986 | |||
1908 | if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) | 1987 | if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) |
1909 | return; | 1988 | return; |
1910 | 1989 | ||
@@ -1923,7 +2002,7 @@ intel_dp_link_down(struct intel_dp *intel_dp) | |||
1923 | 2002 | ||
1924 | if (HAS_PCH_IBX(dev) && | 2003 | if (HAS_PCH_IBX(dev) && |
1925 | I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { | 2004 | I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { |
1926 | struct drm_crtc *crtc = intel_dp->base.base.crtc; | 2005 | struct drm_crtc *crtc = intel_dig_port->base.base.crtc; |
1927 | 2006 | ||
1928 | /* Hardware workaround: leaving our transcoder select | 2007 | /* Hardware workaround: leaving our transcoder select |
1929 | * set to transcoder B while it's off will prevent the | 2008 | * set to transcoder B while it's off will prevent the |
@@ -2024,7 +2103,7 @@ static void | |||
2024 | intel_dp_handle_test_request(struct intel_dp *intel_dp) | 2103 | intel_dp_handle_test_request(struct intel_dp *intel_dp) |
2025 | { | 2104 | { |
2026 | /* NAK by default */ | 2105 | /* NAK by default */ |
2027 | intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK); | 2106 | intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK); |
2028 | } | 2107 | } |
2029 | 2108 | ||
2030 | /* | 2109 | /* |
@@ -2036,16 +2115,17 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp) | |||
2036 | * 4. Check link status on receipt of hot-plug interrupt | 2115 | * 4. Check link status on receipt of hot-plug interrupt |
2037 | */ | 2116 | */ |
2038 | 2117 | ||
2039 | static void | 2118 | void |
2040 | intel_dp_check_link_status(struct intel_dp *intel_dp) | 2119 | intel_dp_check_link_status(struct intel_dp *intel_dp) |
2041 | { | 2120 | { |
2121 | struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; | ||
2042 | u8 sink_irq_vector; | 2122 | u8 sink_irq_vector; |
2043 | u8 link_status[DP_LINK_STATUS_SIZE]; | 2123 | u8 link_status[DP_LINK_STATUS_SIZE]; |
2044 | 2124 | ||
2045 | if (!intel_dp->base.connectors_active) | 2125 | if (!intel_encoder->connectors_active) |
2046 | return; | 2126 | return; |
2047 | 2127 | ||
2048 | if (WARN_ON(!intel_dp->base.base.crtc)) | 2128 | if (WARN_ON(!intel_encoder->base.crtc)) |
2049 | return; | 2129 | return; |
2050 | 2130 | ||
2051 | /* Try to read receiver status if the link appears to be up */ | 2131 | /* Try to read receiver status if the link appears to be up */ |
@@ -2074,9 +2154,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) | |||
2074 | DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); | 2154 | DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); |
2075 | } | 2155 | } |
2076 | 2156 | ||
2077 | if (!intel_channel_eq_ok(intel_dp, link_status)) { | 2157 | if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { |
2078 | DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", | 2158 | DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", |
2079 | drm_get_encoder_name(&intel_dp->base.base)); | 2159 | drm_get_encoder_name(&intel_encoder->base)); |
2080 | intel_dp_start_link_train(intel_dp); | 2160 | intel_dp_start_link_train(intel_dp); |
2081 | intel_dp_complete_link_train(intel_dp); | 2161 | intel_dp_complete_link_train(intel_dp); |
2082 | } | 2162 | } |
@@ -2125,11 +2205,12 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp) | |||
2125 | static enum drm_connector_status | 2205 | static enum drm_connector_status |
2126 | ironlake_dp_detect(struct intel_dp *intel_dp) | 2206 | ironlake_dp_detect(struct intel_dp *intel_dp) |
2127 | { | 2207 | { |
2208 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | ||
2128 | enum drm_connector_status status; | 2209 | enum drm_connector_status status; |
2129 | 2210 | ||
2130 | /* Can't disconnect eDP, but you can close the lid... */ | 2211 | /* Can't disconnect eDP, but you can close the lid... */ |
2131 | if (is_edp(intel_dp)) { | 2212 | if (is_edp(intel_dp)) { |
2132 | status = intel_panel_detect(intel_dp->base.base.dev); | 2213 | status = intel_panel_detect(dev); |
2133 | if (status == connector_status_unknown) | 2214 | if (status == connector_status_unknown) |
2134 | status = connector_status_connected; | 2215 | status = connector_status_connected; |
2135 | return status; | 2216 | return status; |
@@ -2141,7 +2222,7 @@ ironlake_dp_detect(struct intel_dp *intel_dp) | |||
2141 | static enum drm_connector_status | 2222 | static enum drm_connector_status |
2142 | g4x_dp_detect(struct intel_dp *intel_dp) | 2223 | g4x_dp_detect(struct intel_dp *intel_dp) |
2143 | { | 2224 | { |
2144 | struct drm_device *dev = intel_dp->base.base.dev; | 2225 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
2145 | struct drm_i915_private *dev_priv = dev->dev_private; | 2226 | struct drm_i915_private *dev_priv = dev->dev_private; |
2146 | uint32_t bit; | 2227 | uint32_t bit; |
2147 | 2228 | ||
@@ -2168,44 +2249,45 @@ g4x_dp_detect(struct intel_dp *intel_dp) | |||
2168 | static struct edid * | 2249 | static struct edid * |
2169 | intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) | 2250 | intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) |
2170 | { | 2251 | { |
2171 | struct intel_dp *intel_dp = intel_attached_dp(connector); | 2252 | struct intel_connector *intel_connector = to_intel_connector(connector); |
2172 | struct edid *edid; | ||
2173 | int size; | ||
2174 | 2253 | ||
2175 | if (is_edp(intel_dp)) { | 2254 | /* use cached edid if we have one */ |
2176 | if (!intel_dp->edid) | 2255 | if (intel_connector->edid) { |
2256 | struct edid *edid; | ||
2257 | int size; | ||
2258 | |||
2259 | /* invalid edid */ | ||
2260 | if (IS_ERR(intel_connector->edid)) | ||
2177 | return NULL; | 2261 | return NULL; |
2178 | 2262 | ||
2179 | size = (intel_dp->edid->extensions + 1) * EDID_LENGTH; | 2263 | size = (intel_connector->edid->extensions + 1) * EDID_LENGTH; |
2180 | edid = kmalloc(size, GFP_KERNEL); | 2264 | edid = kmalloc(size, GFP_KERNEL); |
2181 | if (!edid) | 2265 | if (!edid) |
2182 | return NULL; | 2266 | return NULL; |
2183 | 2267 | ||
2184 | memcpy(edid, intel_dp->edid, size); | 2268 | memcpy(edid, intel_connector->edid, size); |
2185 | return edid; | 2269 | return edid; |
2186 | } | 2270 | } |
2187 | 2271 | ||
2188 | edid = drm_get_edid(connector, adapter); | 2272 | return drm_get_edid(connector, adapter); |
2189 | return edid; | ||
2190 | } | 2273 | } |
2191 | 2274 | ||
2192 | static int | 2275 | static int |
2193 | intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) | 2276 | intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) |
2194 | { | 2277 | { |
2195 | struct intel_dp *intel_dp = intel_attached_dp(connector); | 2278 | struct intel_connector *intel_connector = to_intel_connector(connector); |
2196 | int ret; | ||
2197 | 2279 | ||
2198 | if (is_edp(intel_dp)) { | 2280 | /* use cached edid if we have one */ |
2199 | drm_mode_connector_update_edid_property(connector, | 2281 | if (intel_connector->edid) { |
2200 | intel_dp->edid); | 2282 | /* invalid edid */ |
2201 | ret = drm_add_edid_modes(connector, intel_dp->edid); | 2283 | if (IS_ERR(intel_connector->edid)) |
2202 | drm_edid_to_eld(connector, | 2284 | return 0; |
2203 | intel_dp->edid); | 2285 | |
2204 | return intel_dp->edid_mode_count; | 2286 | return intel_connector_update_modes(connector, |
2287 | intel_connector->edid); | ||
2205 | } | 2288 | } |
2206 | 2289 | ||
2207 | ret = intel_ddc_get_modes(connector, adapter); | 2290 | return intel_ddc_get_modes(connector, adapter); |
2208 | return ret; | ||
2209 | } | 2291 | } |
2210 | 2292 | ||
2211 | 2293 | ||
@@ -2219,9 +2301,12 @@ static enum drm_connector_status | |||
2219 | intel_dp_detect(struct drm_connector *connector, bool force) | 2301 | intel_dp_detect(struct drm_connector *connector, bool force) |
2220 | { | 2302 | { |
2221 | struct intel_dp *intel_dp = intel_attached_dp(connector); | 2303 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
2222 | struct drm_device *dev = intel_dp->base.base.dev; | 2304 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
2305 | struct intel_encoder *intel_encoder = &intel_dig_port->base; | ||
2306 | struct drm_device *dev = connector->dev; | ||
2223 | enum drm_connector_status status; | 2307 | enum drm_connector_status status; |
2224 | struct edid *edid = NULL; | 2308 | struct edid *edid = NULL; |
2309 | char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; | ||
2225 | 2310 | ||
2226 | intel_dp->has_audio = false; | 2311 | intel_dp->has_audio = false; |
2227 | 2312 | ||
@@ -2230,10 +2315,9 @@ intel_dp_detect(struct drm_connector *connector, bool force) | |||
2230 | else | 2315 | else |
2231 | status = g4x_dp_detect(intel_dp); | 2316 | status = g4x_dp_detect(intel_dp); |
2232 | 2317 | ||
2233 | DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n", | 2318 | hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), |
2234 | intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2], | 2319 | 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); |
2235 | intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5], | 2320 | DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); |
2236 | intel_dp->dpcd[6], intel_dp->dpcd[7]); | ||
2237 | 2321 | ||
2238 | if (status != connector_status_connected) | 2322 | if (status != connector_status_connected) |
2239 | return status; | 2323 | return status; |
@@ -2250,49 +2334,31 @@ intel_dp_detect(struct drm_connector *connector, bool force) | |||
2250 | } | 2334 | } |
2251 | } | 2335 | } |
2252 | 2336 | ||
2337 | if (intel_encoder->type != INTEL_OUTPUT_EDP) | ||
2338 | intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; | ||
2253 | return connector_status_connected; | 2339 | return connector_status_connected; |
2254 | } | 2340 | } |
2255 | 2341 | ||
2256 | static int intel_dp_get_modes(struct drm_connector *connector) | 2342 | static int intel_dp_get_modes(struct drm_connector *connector) |
2257 | { | 2343 | { |
2258 | struct intel_dp *intel_dp = intel_attached_dp(connector); | 2344 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
2259 | struct drm_device *dev = intel_dp->base.base.dev; | 2345 | struct intel_connector *intel_connector = to_intel_connector(connector); |
2260 | struct drm_i915_private *dev_priv = dev->dev_private; | 2346 | struct drm_device *dev = connector->dev; |
2261 | int ret; | 2347 | int ret; |
2262 | 2348 | ||
2263 | /* We should parse the EDID data and find out if it has an audio sink | 2349 | /* We should parse the EDID data and find out if it has an audio sink |
2264 | */ | 2350 | */ |
2265 | 2351 | ||
2266 | ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); | 2352 | ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); |
2267 | if (ret) { | 2353 | if (ret) |
2268 | if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) { | ||
2269 | struct drm_display_mode *newmode; | ||
2270 | list_for_each_entry(newmode, &connector->probed_modes, | ||
2271 | head) { | ||
2272 | if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) { | ||
2273 | intel_dp->panel_fixed_mode = | ||
2274 | drm_mode_duplicate(dev, newmode); | ||
2275 | break; | ||
2276 | } | ||
2277 | } | ||
2278 | } | ||
2279 | return ret; | 2354 | return ret; |
2280 | } | ||
2281 | 2355 | ||
2282 | /* if eDP has no EDID, try to use fixed panel mode from VBT */ | 2356 | /* if eDP has no EDID, fall back to fixed mode */ |
2283 | if (is_edp(intel_dp)) { | 2357 | if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { |
2284 | /* initialize panel mode from VBT if available for eDP */ | 2358 | struct drm_display_mode *mode; |
2285 | if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) { | 2359 | mode = drm_mode_duplicate(dev, |
2286 | intel_dp->panel_fixed_mode = | 2360 | intel_connector->panel.fixed_mode); |
2287 | drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); | 2361 | if (mode) { |
2288 | if (intel_dp->panel_fixed_mode) { | ||
2289 | intel_dp->panel_fixed_mode->type |= | ||
2290 | DRM_MODE_TYPE_PREFERRED; | ||
2291 | } | ||
2292 | } | ||
2293 | if (intel_dp->panel_fixed_mode) { | ||
2294 | struct drm_display_mode *mode; | ||
2295 | mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode); | ||
2296 | drm_mode_probed_add(connector, mode); | 2362 | drm_mode_probed_add(connector, mode); |
2297 | return 1; | 2363 | return 1; |
2298 | } | 2364 | } |
@@ -2322,7 +2388,9 @@ intel_dp_set_property(struct drm_connector *connector, | |||
2322 | uint64_t val) | 2388 | uint64_t val) |
2323 | { | 2389 | { |
2324 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 2390 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
2325 | struct intel_dp *intel_dp = intel_attached_dp(connector); | 2391 | struct intel_connector *intel_connector = to_intel_connector(connector); |
2392 | struct intel_encoder *intel_encoder = intel_attached_encoder(connector); | ||
2393 | struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); | ||
2326 | int ret; | 2394 | int ret; |
2327 | 2395 | ||
2328 | ret = drm_connector_property_set_value(connector, property, val); | 2396 | ret = drm_connector_property_set_value(connector, property, val); |
@@ -2358,11 +2426,27 @@ intel_dp_set_property(struct drm_connector *connector, | |||
2358 | goto done; | 2426 | goto done; |
2359 | } | 2427 | } |
2360 | 2428 | ||
2429 | if (is_edp(intel_dp) && | ||
2430 | property == connector->dev->mode_config.scaling_mode_property) { | ||
2431 | if (val == DRM_MODE_SCALE_NONE) { | ||
2432 | DRM_DEBUG_KMS("no scaling not supported\n"); | ||
2433 | return -EINVAL; | ||
2434 | } | ||
2435 | |||
2436 | if (intel_connector->panel.fitting_mode == val) { | ||
2437 | /* the eDP scaling property is not changed */ | ||
2438 | return 0; | ||
2439 | } | ||
2440 | intel_connector->panel.fitting_mode = val; | ||
2441 | |||
2442 | goto done; | ||
2443 | } | ||
2444 | |||
2361 | return -EINVAL; | 2445 | return -EINVAL; |
2362 | 2446 | ||
2363 | done: | 2447 | done: |
2364 | if (intel_dp->base.base.crtc) { | 2448 | if (intel_encoder->base.crtc) { |
2365 | struct drm_crtc *crtc = intel_dp->base.base.crtc; | 2449 | struct drm_crtc *crtc = intel_encoder->base.crtc; |
2366 | intel_set_mode(crtc, &crtc->mode, | 2450 | intel_set_mode(crtc, &crtc->mode, |
2367 | crtc->x, crtc->y, crtc->fb); | 2451 | crtc->x, crtc->y, crtc->fb); |
2368 | } | 2452 | } |
@@ -2375,27 +2459,33 @@ intel_dp_destroy(struct drm_connector *connector) | |||
2375 | { | 2459 | { |
2376 | struct drm_device *dev = connector->dev; | 2460 | struct drm_device *dev = connector->dev; |
2377 | struct intel_dp *intel_dp = intel_attached_dp(connector); | 2461 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
2462 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
2378 | 2463 | ||
2379 | if (is_edp(intel_dp)) | 2464 | if (!IS_ERR_OR_NULL(intel_connector->edid)) |
2465 | kfree(intel_connector->edid); | ||
2466 | |||
2467 | if (is_edp(intel_dp)) { | ||
2380 | intel_panel_destroy_backlight(dev); | 2468 | intel_panel_destroy_backlight(dev); |
2469 | intel_panel_fini(&intel_connector->panel); | ||
2470 | } | ||
2381 | 2471 | ||
2382 | drm_sysfs_connector_remove(connector); | 2472 | drm_sysfs_connector_remove(connector); |
2383 | drm_connector_cleanup(connector); | 2473 | drm_connector_cleanup(connector); |
2384 | kfree(connector); | 2474 | kfree(connector); |
2385 | } | 2475 | } |
2386 | 2476 | ||
2387 | static void intel_dp_encoder_destroy(struct drm_encoder *encoder) | 2477 | void intel_dp_encoder_destroy(struct drm_encoder *encoder) |
2388 | { | 2478 | { |
2389 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 2479 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); |
2480 | struct intel_dp *intel_dp = &intel_dig_port->dp; | ||
2390 | 2481 | ||
2391 | i2c_del_adapter(&intel_dp->adapter); | 2482 | i2c_del_adapter(&intel_dp->adapter); |
2392 | drm_encoder_cleanup(encoder); | 2483 | drm_encoder_cleanup(encoder); |
2393 | if (is_edp(intel_dp)) { | 2484 | if (is_edp(intel_dp)) { |
2394 | kfree(intel_dp->edid); | ||
2395 | cancel_delayed_work_sync(&intel_dp->panel_vdd_work); | 2485 | cancel_delayed_work_sync(&intel_dp->panel_vdd_work); |
2396 | ironlake_panel_vdd_off_sync(intel_dp); | 2486 | ironlake_panel_vdd_off_sync(intel_dp); |
2397 | } | 2487 | } |
2398 | kfree(intel_dp); | 2488 | kfree(intel_dig_port); |
2399 | } | 2489 | } |
2400 | 2490 | ||
2401 | static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { | 2491 | static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { |
@@ -2425,7 +2515,7 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = { | |||
2425 | static void | 2515 | static void |
2426 | intel_dp_hot_plug(struct intel_encoder *intel_encoder) | 2516 | intel_dp_hot_plug(struct intel_encoder *intel_encoder) |
2427 | { | 2517 | { |
2428 | struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); | 2518 | struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); |
2429 | 2519 | ||
2430 | intel_dp_check_link_status(intel_dp); | 2520 | intel_dp_check_link_status(intel_dp); |
2431 | } | 2521 | } |
@@ -2435,13 +2525,14 @@ int | |||
2435 | intel_trans_dp_port_sel(struct drm_crtc *crtc) | 2525 | intel_trans_dp_port_sel(struct drm_crtc *crtc) |
2436 | { | 2526 | { |
2437 | struct drm_device *dev = crtc->dev; | 2527 | struct drm_device *dev = crtc->dev; |
2438 | struct intel_encoder *encoder; | 2528 | struct intel_encoder *intel_encoder; |
2529 | struct intel_dp *intel_dp; | ||
2439 | 2530 | ||
2440 | for_each_encoder_on_crtc(dev, crtc, encoder) { | 2531 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
2441 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | 2532 | intel_dp = enc_to_intel_dp(&intel_encoder->base); |
2442 | 2533 | ||
2443 | if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || | 2534 | if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || |
2444 | intel_dp->base.type == INTEL_OUTPUT_EDP) | 2535 | intel_encoder->type == INTEL_OUTPUT_EDP) |
2445 | return intel_dp->output_reg; | 2536 | return intel_dp->output_reg; |
2446 | } | 2537 | } |
2447 | 2538 | ||
@@ -2471,78 +2562,191 @@ bool intel_dpd_is_edp(struct drm_device *dev) | |||
2471 | static void | 2562 | static void |
2472 | intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) | 2563 | intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) |
2473 | { | 2564 | { |
2565 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
2566 | |||
2474 | intel_attach_force_audio_property(connector); | 2567 | intel_attach_force_audio_property(connector); |
2475 | intel_attach_broadcast_rgb_property(connector); | 2568 | intel_attach_broadcast_rgb_property(connector); |
2569 | |||
2570 | if (is_edp(intel_dp)) { | ||
2571 | drm_mode_create_scaling_mode_property(connector->dev); | ||
2572 | drm_connector_attach_property( | ||
2573 | connector, | ||
2574 | connector->dev->mode_config.scaling_mode_property, | ||
2575 | DRM_MODE_SCALE_ASPECT); | ||
2576 | intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; | ||
2577 | } | ||
2578 | } | ||
2579 | |||
2580 | static void | ||
2581 | intel_dp_init_panel_power_sequencer(struct drm_device *dev, | ||
2582 | struct intel_dp *intel_dp) | ||
2583 | { | ||
2584 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2585 | struct edp_power_seq cur, vbt, spec, final; | ||
2586 | u32 pp_on, pp_off, pp_div, pp; | ||
2587 | |||
2588 | /* Workaround: Need to write PP_CONTROL with the unlock key as | ||
2589 | * the very first thing. */ | ||
2590 | pp = ironlake_get_pp_control(dev_priv); | ||
2591 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
2592 | |||
2593 | pp_on = I915_READ(PCH_PP_ON_DELAYS); | ||
2594 | pp_off = I915_READ(PCH_PP_OFF_DELAYS); | ||
2595 | pp_div = I915_READ(PCH_PP_DIVISOR); | ||
2596 | |||
2597 | /* Pull timing values out of registers */ | ||
2598 | cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> | ||
2599 | PANEL_POWER_UP_DELAY_SHIFT; | ||
2600 | |||
2601 | cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> | ||
2602 | PANEL_LIGHT_ON_DELAY_SHIFT; | ||
2603 | |||
2604 | cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> | ||
2605 | PANEL_LIGHT_OFF_DELAY_SHIFT; | ||
2606 | |||
2607 | cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> | ||
2608 | PANEL_POWER_DOWN_DELAY_SHIFT; | ||
2609 | |||
2610 | cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> | ||
2611 | PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; | ||
2612 | |||
2613 | DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", | ||
2614 | cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); | ||
2615 | |||
2616 | vbt = dev_priv->edp.pps; | ||
2617 | |||
2618 | /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of | ||
2619 | * our hw here, which are all in 100usec. */ | ||
2620 | spec.t1_t3 = 210 * 10; | ||
2621 | spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ | ||
2622 | spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ | ||
2623 | spec.t10 = 500 * 10; | ||
2624 | /* This one is special and actually in units of 100ms, but zero | ||
2625 | * based in the hw (so we need to add 100 ms). But the sw vbt | ||
2626 | * table multiplies it with 1000 to make it in units of 100usec, | ||
2627 | * too. */ | ||
2628 | spec.t11_t12 = (510 + 100) * 10; | ||
2629 | |||
2630 | DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", | ||
2631 | vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); | ||
2632 | |||
2633 | /* Use the max of the register settings and vbt. If both are | ||
2634 | * unset, fall back to the spec limits. */ | ||
2635 | #define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \ | ||
2636 | spec.field : \ | ||
2637 | max(cur.field, vbt.field)) | ||
2638 | assign_final(t1_t3); | ||
2639 | assign_final(t8); | ||
2640 | assign_final(t9); | ||
2641 | assign_final(t10); | ||
2642 | assign_final(t11_t12); | ||
2643 | #undef assign_final | ||
2644 | |||
2645 | #define get_delay(field) (DIV_ROUND_UP(final.field, 10)) | ||
2646 | intel_dp->panel_power_up_delay = get_delay(t1_t3); | ||
2647 | intel_dp->backlight_on_delay = get_delay(t8); | ||
2648 | intel_dp->backlight_off_delay = get_delay(t9); | ||
2649 | intel_dp->panel_power_down_delay = get_delay(t10); | ||
2650 | intel_dp->panel_power_cycle_delay = get_delay(t11_t12); | ||
2651 | #undef get_delay | ||
2652 | |||
2653 | /* And finally store the new values in the power sequencer. */ | ||
2654 | pp_on = (final.t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | | ||
2655 | (final.t8 << PANEL_LIGHT_ON_DELAY_SHIFT); | ||
2656 | pp_off = (final.t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | | ||
2657 | (final.t10 << PANEL_POWER_DOWN_DELAY_SHIFT); | ||
2658 | /* Compute the divisor for the pp clock, simply match the Bspec | ||
2659 | * formula. */ | ||
2660 | pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1) | ||
2661 | << PP_REFERENCE_DIVIDER_SHIFT; | ||
2662 | pp_div |= (DIV_ROUND_UP(final.t11_t12, 1000) | ||
2663 | << PANEL_POWER_CYCLE_DELAY_SHIFT); | ||
2664 | |||
2665 | /* Haswell doesn't have any port selection bits for the panel | ||
2666 | * power sequencer any more. */ | ||
2667 | if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { | ||
2668 | if (is_cpu_edp(intel_dp)) | ||
2669 | pp_on |= PANEL_POWER_PORT_DP_A; | ||
2670 | else | ||
2671 | pp_on |= PANEL_POWER_PORT_DP_D; | ||
2672 | } | ||
2673 | |||
2674 | I915_WRITE(PCH_PP_ON_DELAYS, pp_on); | ||
2675 | I915_WRITE(PCH_PP_OFF_DELAYS, pp_off); | ||
2676 | I915_WRITE(PCH_PP_DIVISOR, pp_div); | ||
2677 | |||
2678 | |||
2679 | DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", | ||
2680 | intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, | ||
2681 | intel_dp->panel_power_cycle_delay); | ||
2682 | |||
2683 | DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", | ||
2684 | intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); | ||
2685 | |||
2686 | DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", | ||
2687 | I915_READ(PCH_PP_ON_DELAYS), | ||
2688 | I915_READ(PCH_PP_OFF_DELAYS), | ||
2689 | I915_READ(PCH_PP_DIVISOR)); | ||
2476 | } | 2690 | } |
2477 | 2691 | ||
2478 | void | 2692 | void |
2479 | intel_dp_init(struct drm_device *dev, int output_reg, enum port port) | 2693 | intel_dp_init_connector(struct intel_digital_port *intel_dig_port, |
2694 | struct intel_connector *intel_connector) | ||
2480 | { | 2695 | { |
2696 | struct drm_connector *connector = &intel_connector->base; | ||
2697 | struct intel_dp *intel_dp = &intel_dig_port->dp; | ||
2698 | struct intel_encoder *intel_encoder = &intel_dig_port->base; | ||
2699 | struct drm_device *dev = intel_encoder->base.dev; | ||
2481 | struct drm_i915_private *dev_priv = dev->dev_private; | 2700 | struct drm_i915_private *dev_priv = dev->dev_private; |
2482 | struct drm_connector *connector; | 2701 | struct drm_display_mode *fixed_mode = NULL; |
2483 | struct intel_dp *intel_dp; | 2702 | enum port port = intel_dig_port->port; |
2484 | struct intel_encoder *intel_encoder; | ||
2485 | struct intel_connector *intel_connector; | ||
2486 | const char *name = NULL; | 2703 | const char *name = NULL; |
2487 | int type; | 2704 | int type; |
2488 | 2705 | ||
2489 | intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL); | ||
2490 | if (!intel_dp) | ||
2491 | return; | ||
2492 | |||
2493 | intel_dp->output_reg = output_reg; | ||
2494 | intel_dp->port = port; | ||
2495 | /* Preserve the current hw state. */ | 2706 | /* Preserve the current hw state. */ |
2496 | intel_dp->DP = I915_READ(intel_dp->output_reg); | 2707 | intel_dp->DP = I915_READ(intel_dp->output_reg); |
2708 | intel_dp->attached_connector = intel_connector; | ||
2497 | 2709 | ||
2498 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); | 2710 | if (HAS_PCH_SPLIT(dev) && port == PORT_D) |
2499 | if (!intel_connector) { | ||
2500 | kfree(intel_dp); | ||
2501 | return; | ||
2502 | } | ||
2503 | intel_encoder = &intel_dp->base; | ||
2504 | |||
2505 | if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D) | ||
2506 | if (intel_dpd_is_edp(dev)) | 2711 | if (intel_dpd_is_edp(dev)) |
2507 | intel_dp->is_pch_edp = true; | 2712 | intel_dp->is_pch_edp = true; |
2508 | 2713 | ||
2509 | if (output_reg == DP_A || is_pch_edp(intel_dp)) { | 2714 | /* |
2715 | * FIXME : We need to initialize built-in panels before external panels. | ||
2716 | * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup | ||
2717 | */ | ||
2718 | if (IS_VALLEYVIEW(dev) && port == PORT_C) { | ||
2719 | type = DRM_MODE_CONNECTOR_eDP; | ||
2720 | intel_encoder->type = INTEL_OUTPUT_EDP; | ||
2721 | } else if (port == PORT_A || is_pch_edp(intel_dp)) { | ||
2510 | type = DRM_MODE_CONNECTOR_eDP; | 2722 | type = DRM_MODE_CONNECTOR_eDP; |
2511 | intel_encoder->type = INTEL_OUTPUT_EDP; | 2723 | intel_encoder->type = INTEL_OUTPUT_EDP; |
2512 | } else { | 2724 | } else { |
2725 | /* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for | ||
2726 | * DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't | ||
2727 | * rewrite it. | ||
2728 | */ | ||
2513 | type = DRM_MODE_CONNECTOR_DisplayPort; | 2729 | type = DRM_MODE_CONNECTOR_DisplayPort; |
2514 | intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; | ||
2515 | } | 2730 | } |
2516 | 2731 | ||
2517 | connector = &intel_connector->base; | ||
2518 | drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); | 2732 | drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); |
2519 | drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); | 2733 | drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); |
2520 | 2734 | ||
2521 | connector->polled = DRM_CONNECTOR_POLL_HPD; | 2735 | connector->polled = DRM_CONNECTOR_POLL_HPD; |
2522 | |||
2523 | intel_encoder->cloneable = false; | ||
2524 | |||
2525 | INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, | ||
2526 | ironlake_panel_vdd_work); | ||
2527 | |||
2528 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); | ||
2529 | |||
2530 | connector->interlace_allowed = true; | 2736 | connector->interlace_allowed = true; |
2531 | connector->doublescan_allowed = 0; | 2737 | connector->doublescan_allowed = 0; |
2532 | 2738 | ||
2533 | drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, | 2739 | INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, |
2534 | DRM_MODE_ENCODER_TMDS); | 2740 | ironlake_panel_vdd_work); |
2535 | drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); | ||
2536 | 2741 | ||
2537 | intel_connector_attach_encoder(intel_connector, intel_encoder); | 2742 | intel_connector_attach_encoder(intel_connector, intel_encoder); |
2538 | drm_sysfs_connector_add(connector); | 2743 | drm_sysfs_connector_add(connector); |
2539 | 2744 | ||
2540 | intel_encoder->enable = intel_enable_dp; | 2745 | if (IS_HASWELL(dev)) |
2541 | intel_encoder->pre_enable = intel_pre_enable_dp; | 2746 | intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; |
2542 | intel_encoder->disable = intel_disable_dp; | 2747 | else |
2543 | intel_encoder->post_disable = intel_post_disable_dp; | 2748 | intel_connector->get_hw_state = intel_connector_get_hw_state; |
2544 | intel_encoder->get_hw_state = intel_dp_get_hw_state; | 2749 | |
2545 | intel_connector->get_hw_state = intel_connector_get_hw_state; | ||
2546 | 2750 | ||
2547 | /* Set up the DDC bus. */ | 2751 | /* Set up the DDC bus. */ |
2548 | switch (port) { | 2752 | switch (port) { |
@@ -2566,66 +2770,15 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) | |||
2566 | break; | 2770 | break; |
2567 | } | 2771 | } |
2568 | 2772 | ||
2569 | /* Cache some DPCD data in the eDP case */ | 2773 | if (is_edp(intel_dp)) |
2570 | if (is_edp(intel_dp)) { | 2774 | intel_dp_init_panel_power_sequencer(dev, intel_dp); |
2571 | struct edp_power_seq cur, vbt; | ||
2572 | u32 pp_on, pp_off, pp_div; | ||
2573 | |||
2574 | pp_on = I915_READ(PCH_PP_ON_DELAYS); | ||
2575 | pp_off = I915_READ(PCH_PP_OFF_DELAYS); | ||
2576 | pp_div = I915_READ(PCH_PP_DIVISOR); | ||
2577 | |||
2578 | if (!pp_on || !pp_off || !pp_div) { | ||
2579 | DRM_INFO("bad panel power sequencing delays, disabling panel\n"); | ||
2580 | intel_dp_encoder_destroy(&intel_dp->base.base); | ||
2581 | intel_dp_destroy(&intel_connector->base); | ||
2582 | return; | ||
2583 | } | ||
2584 | |||
2585 | /* Pull timing values out of registers */ | ||
2586 | cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> | ||
2587 | PANEL_POWER_UP_DELAY_SHIFT; | ||
2588 | |||
2589 | cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> | ||
2590 | PANEL_LIGHT_ON_DELAY_SHIFT; | ||
2591 | |||
2592 | cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> | ||
2593 | PANEL_LIGHT_OFF_DELAY_SHIFT; | ||
2594 | |||
2595 | cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> | ||
2596 | PANEL_POWER_DOWN_DELAY_SHIFT; | ||
2597 | |||
2598 | cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> | ||
2599 | PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; | ||
2600 | |||
2601 | DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", | ||
2602 | cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); | ||
2603 | |||
2604 | vbt = dev_priv->edp.pps; | ||
2605 | |||
2606 | DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", | ||
2607 | vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); | ||
2608 | |||
2609 | #define get_delay(field) ((max(cur.field, vbt.field) + 9) / 10) | ||
2610 | |||
2611 | intel_dp->panel_power_up_delay = get_delay(t1_t3); | ||
2612 | intel_dp->backlight_on_delay = get_delay(t8); | ||
2613 | intel_dp->backlight_off_delay = get_delay(t9); | ||
2614 | intel_dp->panel_power_down_delay = get_delay(t10); | ||
2615 | intel_dp->panel_power_cycle_delay = get_delay(t11_t12); | ||
2616 | |||
2617 | DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", | ||
2618 | intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, | ||
2619 | intel_dp->panel_power_cycle_delay); | ||
2620 | |||
2621 | DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", | ||
2622 | intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); | ||
2623 | } | ||
2624 | 2775 | ||
2625 | intel_dp_i2c_init(intel_dp, intel_connector, name); | 2776 | intel_dp_i2c_init(intel_dp, intel_connector, name); |
2626 | 2777 | ||
2778 | /* Cache DPCD and EDID for edp. */ | ||
2627 | if (is_edp(intel_dp)) { | 2779 | if (is_edp(intel_dp)) { |
2628 | bool ret; | 2780 | bool ret; |
2781 | struct drm_display_mode *scan; | ||
2629 | struct edid *edid; | 2782 | struct edid *edid; |
2630 | 2783 | ||
2631 | ironlake_edp_panel_vdd_on(intel_dp); | 2784 | ironlake_edp_panel_vdd_on(intel_dp); |
@@ -2640,29 +2793,47 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) | |||
2640 | } else { | 2793 | } else { |
2641 | /* if this fails, presume the device is a ghost */ | 2794 | /* if this fails, presume the device is a ghost */ |
2642 | DRM_INFO("failed to retrieve link info, disabling eDP\n"); | 2795 | DRM_INFO("failed to retrieve link info, disabling eDP\n"); |
2643 | intel_dp_encoder_destroy(&intel_dp->base.base); | 2796 | intel_dp_encoder_destroy(&intel_encoder->base); |
2644 | intel_dp_destroy(&intel_connector->base); | 2797 | intel_dp_destroy(connector); |
2645 | return; | 2798 | return; |
2646 | } | 2799 | } |
2647 | 2800 | ||
2648 | ironlake_edp_panel_vdd_on(intel_dp); | 2801 | ironlake_edp_panel_vdd_on(intel_dp); |
2649 | edid = drm_get_edid(connector, &intel_dp->adapter); | 2802 | edid = drm_get_edid(connector, &intel_dp->adapter); |
2650 | if (edid) { | 2803 | if (edid) { |
2651 | drm_mode_connector_update_edid_property(connector, | 2804 | if (drm_add_edid_modes(connector, edid)) { |
2652 | edid); | 2805 | drm_mode_connector_update_edid_property(connector, edid); |
2653 | intel_dp->edid_mode_count = | 2806 | drm_edid_to_eld(connector, edid); |
2654 | drm_add_edid_modes(connector, edid); | 2807 | } else { |
2655 | drm_edid_to_eld(connector, edid); | 2808 | kfree(edid); |
2656 | intel_dp->edid = edid; | 2809 | edid = ERR_PTR(-EINVAL); |
2810 | } | ||
2811 | } else { | ||
2812 | edid = ERR_PTR(-ENOENT); | ||
2813 | } | ||
2814 | intel_connector->edid = edid; | ||
2815 | |||
2816 | /* prefer fixed mode from EDID if available */ | ||
2817 | list_for_each_entry(scan, &connector->probed_modes, head) { | ||
2818 | if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { | ||
2819 | fixed_mode = drm_mode_duplicate(dev, scan); | ||
2820 | break; | ||
2821 | } | ||
2657 | } | 2822 | } |
2823 | |||
2824 | /* fallback to VBT if available for eDP */ | ||
2825 | if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) { | ||
2826 | fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); | ||
2827 | if (fixed_mode) | ||
2828 | fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; | ||
2829 | } | ||
2830 | |||
2658 | ironlake_edp_panel_vdd_off(intel_dp, false); | 2831 | ironlake_edp_panel_vdd_off(intel_dp, false); |
2659 | } | 2832 | } |
2660 | 2833 | ||
2661 | intel_encoder->hot_plug = intel_dp_hot_plug; | ||
2662 | |||
2663 | if (is_edp(intel_dp)) { | 2834 | if (is_edp(intel_dp)) { |
2664 | dev_priv->int_edp_connector = connector; | 2835 | intel_panel_init(&intel_connector->panel, fixed_mode); |
2665 | intel_panel_setup_backlight(dev); | 2836 | intel_panel_setup_backlight(connector); |
2666 | } | 2837 | } |
2667 | 2838 | ||
2668 | intel_dp_add_properties(intel_dp, connector); | 2839 | intel_dp_add_properties(intel_dp, connector); |
@@ -2676,3 +2847,45 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) | |||
2676 | I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); | 2847 | I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); |
2677 | } | 2848 | } |
2678 | } | 2849 | } |
2850 | |||
2851 | void | ||
2852 | intel_dp_init(struct drm_device *dev, int output_reg, enum port port) | ||
2853 | { | ||
2854 | struct intel_digital_port *intel_dig_port; | ||
2855 | struct intel_encoder *intel_encoder; | ||
2856 | struct drm_encoder *encoder; | ||
2857 | struct intel_connector *intel_connector; | ||
2858 | |||
2859 | intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); | ||
2860 | if (!intel_dig_port) | ||
2861 | return; | ||
2862 | |||
2863 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); | ||
2864 | if (!intel_connector) { | ||
2865 | kfree(intel_dig_port); | ||
2866 | return; | ||
2867 | } | ||
2868 | |||
2869 | intel_encoder = &intel_dig_port->base; | ||
2870 | encoder = &intel_encoder->base; | ||
2871 | |||
2872 | drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, | ||
2873 | DRM_MODE_ENCODER_TMDS); | ||
2874 | drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); | ||
2875 | |||
2876 | intel_encoder->enable = intel_enable_dp; | ||
2877 | intel_encoder->pre_enable = intel_pre_enable_dp; | ||
2878 | intel_encoder->disable = intel_disable_dp; | ||
2879 | intel_encoder->post_disable = intel_post_disable_dp; | ||
2880 | intel_encoder->get_hw_state = intel_dp_get_hw_state; | ||
2881 | |||
2882 | intel_dig_port->port = port; | ||
2883 | intel_dig_port->dp.output_reg = output_reg; | ||
2884 | |||
2885 | intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; | ||
2886 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); | ||
2887 | intel_encoder->cloneable = false; | ||
2888 | intel_encoder->hot_plug = intel_dp_hot_plug; | ||
2889 | |||
2890 | intel_dp_init_connector(intel_dig_port, intel_connector); | ||
2891 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index fe7142502f43..bcc52412810f 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -94,6 +94,7 @@ | |||
94 | #define INTEL_OUTPUT_HDMI 6 | 94 | #define INTEL_OUTPUT_HDMI 6 |
95 | #define INTEL_OUTPUT_DISPLAYPORT 7 | 95 | #define INTEL_OUTPUT_DISPLAYPORT 7 |
96 | #define INTEL_OUTPUT_EDP 8 | 96 | #define INTEL_OUTPUT_EDP 8 |
97 | #define INTEL_OUTPUT_UNKNOWN 9 | ||
97 | 98 | ||
98 | #define INTEL_DVO_CHIP_NONE 0 | 99 | #define INTEL_DVO_CHIP_NONE 0 |
99 | #define INTEL_DVO_CHIP_LVDS 1 | 100 | #define INTEL_DVO_CHIP_LVDS 1 |
@@ -163,6 +164,11 @@ struct intel_encoder { | |||
163 | int crtc_mask; | 164 | int crtc_mask; |
164 | }; | 165 | }; |
165 | 166 | ||
167 | struct intel_panel { | ||
168 | struct drm_display_mode *fixed_mode; | ||
169 | int fitting_mode; | ||
170 | }; | ||
171 | |||
166 | struct intel_connector { | 172 | struct intel_connector { |
167 | struct drm_connector base; | 173 | struct drm_connector base; |
168 | /* | 174 | /* |
@@ -179,12 +185,19 @@ struct intel_connector { | |||
179 | /* Reads out the current hw, returning true if the connector is enabled | 185 | /* Reads out the current hw, returning true if the connector is enabled |
180 | * and active (i.e. dpms ON state). */ | 186 | * and active (i.e. dpms ON state). */ |
181 | bool (*get_hw_state)(struct intel_connector *); | 187 | bool (*get_hw_state)(struct intel_connector *); |
188 | |||
189 | /* Panel info for eDP and LVDS */ | ||
190 | struct intel_panel panel; | ||
191 | |||
192 | /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */ | ||
193 | struct edid *edid; | ||
182 | }; | 194 | }; |
183 | 195 | ||
184 | struct intel_crtc { | 196 | struct intel_crtc { |
185 | struct drm_crtc base; | 197 | struct drm_crtc base; |
186 | enum pipe pipe; | 198 | enum pipe pipe; |
187 | enum plane plane; | 199 | enum plane plane; |
200 | enum transcoder cpu_transcoder; | ||
188 | u8 lut_r[256], lut_g[256], lut_b[256]; | 201 | u8 lut_r[256], lut_g[256], lut_b[256]; |
189 | /* | 202 | /* |
190 | * Whether the crtc and the connected output pipeline is active. Implies | 203 | * Whether the crtc and the connected output pipeline is active. Implies |
@@ -212,12 +225,14 @@ struct intel_crtc { | |||
212 | 225 | ||
213 | /* We can share PLLs across outputs if the timings match */ | 226 | /* We can share PLLs across outputs if the timings match */ |
214 | struct intel_pch_pll *pch_pll; | 227 | struct intel_pch_pll *pch_pll; |
228 | uint32_t ddi_pll_sel; | ||
215 | }; | 229 | }; |
216 | 230 | ||
217 | struct intel_plane { | 231 | struct intel_plane { |
218 | struct drm_plane base; | 232 | struct drm_plane base; |
219 | enum pipe pipe; | 233 | enum pipe pipe; |
220 | struct drm_i915_gem_object *obj; | 234 | struct drm_i915_gem_object *obj; |
235 | bool can_scale; | ||
221 | int max_downscale; | 236 | int max_downscale; |
222 | u32 lut_r[1024], lut_g[1024], lut_b[1024]; | 237 | u32 lut_r[1024], lut_g[1024], lut_b[1024]; |
223 | void (*update_plane)(struct drm_plane *plane, | 238 | void (*update_plane)(struct drm_plane *plane, |
@@ -317,10 +332,8 @@ struct dip_infoframe { | |||
317 | } __attribute__((packed)); | 332 | } __attribute__((packed)); |
318 | 333 | ||
319 | struct intel_hdmi { | 334 | struct intel_hdmi { |
320 | struct intel_encoder base; | ||
321 | u32 sdvox_reg; | 335 | u32 sdvox_reg; |
322 | int ddc_bus; | 336 | int ddc_bus; |
323 | int ddi_port; | ||
324 | uint32_t color_range; | 337 | uint32_t color_range; |
325 | bool has_hdmi_sink; | 338 | bool has_hdmi_sink; |
326 | bool has_audio; | 339 | bool has_audio; |
@@ -331,18 +344,15 @@ struct intel_hdmi { | |||
331 | struct drm_display_mode *adjusted_mode); | 344 | struct drm_display_mode *adjusted_mode); |
332 | }; | 345 | }; |
333 | 346 | ||
334 | #define DP_RECEIVER_CAP_SIZE 0xf | ||
335 | #define DP_MAX_DOWNSTREAM_PORTS 0x10 | 347 | #define DP_MAX_DOWNSTREAM_PORTS 0x10 |
336 | #define DP_LINK_CONFIGURATION_SIZE 9 | 348 | #define DP_LINK_CONFIGURATION_SIZE 9 |
337 | 349 | ||
338 | struct intel_dp { | 350 | struct intel_dp { |
339 | struct intel_encoder base; | ||
340 | uint32_t output_reg; | 351 | uint32_t output_reg; |
341 | uint32_t DP; | 352 | uint32_t DP; |
342 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; | 353 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; |
343 | bool has_audio; | 354 | bool has_audio; |
344 | enum hdmi_force_audio force_audio; | 355 | enum hdmi_force_audio force_audio; |
345 | enum port port; | ||
346 | uint32_t color_range; | 356 | uint32_t color_range; |
347 | uint8_t link_bw; | 357 | uint8_t link_bw; |
348 | uint8_t lane_count; | 358 | uint8_t lane_count; |
@@ -357,11 +367,16 @@ struct intel_dp { | |||
357 | int panel_power_cycle_delay; | 367 | int panel_power_cycle_delay; |
358 | int backlight_on_delay; | 368 | int backlight_on_delay; |
359 | int backlight_off_delay; | 369 | int backlight_off_delay; |
360 | struct drm_display_mode *panel_fixed_mode; /* for eDP */ | ||
361 | struct delayed_work panel_vdd_work; | 370 | struct delayed_work panel_vdd_work; |
362 | bool want_panel_vdd; | 371 | bool want_panel_vdd; |
363 | struct edid *edid; /* cached EDID for eDP */ | 372 | struct intel_connector *attached_connector; |
364 | int edid_mode_count; | 373 | }; |
374 | |||
375 | struct intel_digital_port { | ||
376 | struct intel_encoder base; | ||
377 | enum port port; | ||
378 | struct intel_dp dp; | ||
379 | struct intel_hdmi hdmi; | ||
365 | }; | 380 | }; |
366 | 381 | ||
367 | static inline struct drm_crtc * | 382 | static inline struct drm_crtc * |
@@ -395,6 +410,8 @@ struct intel_fbc_work { | |||
395 | int interval; | 410 | int interval; |
396 | }; | 411 | }; |
397 | 412 | ||
413 | int intel_pch_rawclk(struct drm_device *dev); | ||
414 | |||
398 | int intel_connector_update_modes(struct drm_connector *connector, | 415 | int intel_connector_update_modes(struct drm_connector *connector, |
399 | struct edid *edid); | 416 | struct edid *edid); |
400 | int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); | 417 | int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); |
@@ -405,7 +422,12 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector) | |||
405 | extern void intel_crt_init(struct drm_device *dev); | 422 | extern void intel_crt_init(struct drm_device *dev); |
406 | extern void intel_hdmi_init(struct drm_device *dev, | 423 | extern void intel_hdmi_init(struct drm_device *dev, |
407 | int sdvox_reg, enum port port); | 424 | int sdvox_reg, enum port port); |
425 | extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, | ||
426 | struct intel_connector *intel_connector); | ||
408 | extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); | 427 | extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); |
428 | extern bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, | ||
429 | const struct drm_display_mode *mode, | ||
430 | struct drm_display_mode *adjusted_mode); | ||
409 | extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); | 431 | extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); |
410 | extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, | 432 | extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, |
411 | bool is_sdvob); | 433 | bool is_sdvob); |
@@ -418,10 +440,27 @@ extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj); | |||
418 | extern bool intel_lvds_init(struct drm_device *dev); | 440 | extern bool intel_lvds_init(struct drm_device *dev); |
419 | extern void intel_dp_init(struct drm_device *dev, int output_reg, | 441 | extern void intel_dp_init(struct drm_device *dev, int output_reg, |
420 | enum port port); | 442 | enum port port); |
443 | extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | ||
444 | struct intel_connector *intel_connector); | ||
421 | void | 445 | void |
422 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | 446 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, |
423 | struct drm_display_mode *adjusted_mode); | 447 | struct drm_display_mode *adjusted_mode); |
448 | extern void intel_dp_init_link_config(struct intel_dp *intel_dp); | ||
449 | extern void intel_dp_start_link_train(struct intel_dp *intel_dp); | ||
450 | extern void intel_dp_complete_link_train(struct intel_dp *intel_dp); | ||
451 | extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); | ||
452 | extern void intel_dp_encoder_destroy(struct drm_encoder *encoder); | ||
453 | extern void intel_dp_check_link_status(struct intel_dp *intel_dp); | ||
454 | extern bool intel_dp_mode_fixup(struct drm_encoder *encoder, | ||
455 | const struct drm_display_mode *mode, | ||
456 | struct drm_display_mode *adjusted_mode); | ||
424 | extern bool intel_dpd_is_edp(struct drm_device *dev); | 457 | extern bool intel_dpd_is_edp(struct drm_device *dev); |
458 | extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp); | ||
459 | extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp); | ||
460 | extern void ironlake_edp_panel_on(struct intel_dp *intel_dp); | ||
461 | extern void ironlake_edp_panel_off(struct intel_dp *intel_dp); | ||
462 | extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp); | ||
463 | extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); | ||
425 | extern void intel_edp_link_config(struct intel_encoder *, int *, int *); | 464 | extern void intel_edp_link_config(struct intel_encoder *, int *, int *); |
426 | extern int intel_edp_target_clock(struct intel_encoder *, | 465 | extern int intel_edp_target_clock(struct intel_encoder *, |
427 | struct drm_display_mode *mode); | 466 | struct drm_display_mode *mode); |
@@ -431,6 +470,10 @@ extern void intel_flush_display_plane(struct drm_i915_private *dev_priv, | |||
431 | enum plane plane); | 470 | enum plane plane); |
432 | 471 | ||
433 | /* intel_panel.c */ | 472 | /* intel_panel.c */ |
473 | extern int intel_panel_init(struct intel_panel *panel, | ||
474 | struct drm_display_mode *fixed_mode); | ||
475 | extern void intel_panel_fini(struct intel_panel *panel); | ||
476 | |||
434 | extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, | 477 | extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, |
435 | struct drm_display_mode *adjusted_mode); | 478 | struct drm_display_mode *adjusted_mode); |
436 | extern void intel_pch_panel_fitting(struct drm_device *dev, | 479 | extern void intel_pch_panel_fitting(struct drm_device *dev, |
@@ -439,7 +482,7 @@ extern void intel_pch_panel_fitting(struct drm_device *dev, | |||
439 | struct drm_display_mode *adjusted_mode); | 482 | struct drm_display_mode *adjusted_mode); |
440 | extern u32 intel_panel_get_max_backlight(struct drm_device *dev); | 483 | extern u32 intel_panel_get_max_backlight(struct drm_device *dev); |
441 | extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); | 484 | extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); |
442 | extern int intel_panel_setup_backlight(struct drm_device *dev); | 485 | extern int intel_panel_setup_backlight(struct drm_connector *connector); |
443 | extern void intel_panel_enable_backlight(struct drm_device *dev, | 486 | extern void intel_panel_enable_backlight(struct drm_device *dev, |
444 | enum pipe pipe); | 487 | enum pipe pipe); |
445 | extern void intel_panel_disable_backlight(struct drm_device *dev); | 488 | extern void intel_panel_disable_backlight(struct drm_device *dev); |
@@ -473,6 +516,31 @@ static inline struct intel_encoder *intel_attached_encoder(struct drm_connector | |||
473 | return to_intel_connector(connector)->encoder; | 516 | return to_intel_connector(connector)->encoder; |
474 | } | 517 | } |
475 | 518 | ||
519 | static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) | ||
520 | { | ||
521 | struct intel_digital_port *intel_dig_port = | ||
522 | container_of(encoder, struct intel_digital_port, base.base); | ||
523 | return &intel_dig_port->dp; | ||
524 | } | ||
525 | |||
526 | static inline struct intel_digital_port * | ||
527 | enc_to_dig_port(struct drm_encoder *encoder) | ||
528 | { | ||
529 | return container_of(encoder, struct intel_digital_port, base.base); | ||
530 | } | ||
531 | |||
532 | static inline struct intel_digital_port * | ||
533 | dp_to_dig_port(struct intel_dp *intel_dp) | ||
534 | { | ||
535 | return container_of(intel_dp, struct intel_digital_port, dp); | ||
536 | } | ||
537 | |||
538 | static inline struct intel_digital_port * | ||
539 | hdmi_to_dig_port(struct intel_hdmi *intel_hdmi) | ||
540 | { | ||
541 | return container_of(intel_hdmi, struct intel_digital_port, hdmi); | ||
542 | } | ||
543 | |||
476 | extern void intel_connector_attach_encoder(struct intel_connector *connector, | 544 | extern void intel_connector_attach_encoder(struct intel_connector *connector, |
477 | struct intel_encoder *encoder); | 545 | struct intel_encoder *encoder); |
478 | extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); | 546 | extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); |
@@ -481,6 +549,9 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | |||
481 | struct drm_crtc *crtc); | 549 | struct drm_crtc *crtc); |
482 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, | 550 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, |
483 | struct drm_file *file_priv); | 551 | struct drm_file *file_priv); |
552 | extern enum transcoder | ||
553 | intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, | ||
554 | enum pipe pipe); | ||
484 | extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); | 555 | extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); |
485 | extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); | 556 | extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); |
486 | 557 | ||
@@ -550,6 +621,10 @@ extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, | |||
550 | extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe, | 621 | extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe, |
551 | struct drm_display_mode *mode); | 622 | struct drm_display_mode *mode); |
552 | 623 | ||
624 | extern unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y, | ||
625 | unsigned int bpp, | ||
626 | unsigned int pitch); | ||
627 | |||
553 | extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data, | 628 | extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data, |
554 | struct drm_file *file_priv); | 629 | struct drm_file *file_priv); |
555 | extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data, | 630 | extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data, |
@@ -573,12 +648,22 @@ extern void intel_disable_gt_powersave(struct drm_device *dev); | |||
573 | extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv); | 648 | extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv); |
574 | extern void ironlake_teardown_rc6(struct drm_device *dev); | 649 | extern void ironlake_teardown_rc6(struct drm_device *dev); |
575 | 650 | ||
576 | extern void intel_enable_ddi(struct intel_encoder *encoder); | ||
577 | extern void intel_disable_ddi(struct intel_encoder *encoder); | ||
578 | extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder, | 651 | extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder, |
579 | enum pipe *pipe); | 652 | enum pipe *pipe); |
580 | extern void intel_ddi_mode_set(struct drm_encoder *encoder, | 653 | extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv); |
581 | struct drm_display_mode *mode, | 654 | extern void intel_ddi_pll_init(struct drm_device *dev); |
582 | struct drm_display_mode *adjusted_mode); | 655 | extern void intel_ddi_enable_pipe_func(struct drm_crtc *crtc); |
656 | extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, | ||
657 | enum transcoder cpu_transcoder); | ||
658 | extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc); | ||
659 | extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc); | ||
660 | extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev); | ||
661 | extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock); | ||
662 | extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc); | ||
663 | extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc); | ||
664 | extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder); | ||
665 | extern bool | ||
666 | intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); | ||
667 | extern void intel_ddi_fdi_disable(struct drm_crtc *crtc); | ||
583 | 668 | ||
584 | #endif /* __INTEL_DRV_H__ */ | 669 | #endif /* __INTEL_DRV_H__ */ |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 9ba0aaed7ee8..1dcfd5b6e141 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -36,10 +36,15 @@ | |||
36 | #include <drm/i915_drm.h> | 36 | #include <drm/i915_drm.h> |
37 | #include "i915_drv.h" | 37 | #include "i915_drv.h" |
38 | 38 | ||
39 | static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi) | ||
40 | { | ||
41 | return hdmi_to_dig_port(intel_hdmi)->base.base.dev; | ||
42 | } | ||
43 | |||
39 | static void | 44 | static void |
40 | assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) | 45 | assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) |
41 | { | 46 | { |
42 | struct drm_device *dev = intel_hdmi->base.base.dev; | 47 | struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi); |
43 | struct drm_i915_private *dev_priv = dev->dev_private; | 48 | struct drm_i915_private *dev_priv = dev->dev_private; |
44 | uint32_t enabled_bits; | 49 | uint32_t enabled_bits; |
45 | 50 | ||
@@ -51,13 +56,14 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) | |||
51 | 56 | ||
52 | struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) | 57 | struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) |
53 | { | 58 | { |
54 | return container_of(encoder, struct intel_hdmi, base.base); | 59 | struct intel_digital_port *intel_dig_port = |
60 | container_of(encoder, struct intel_digital_port, base.base); | ||
61 | return &intel_dig_port->hdmi; | ||
55 | } | 62 | } |
56 | 63 | ||
57 | static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) | 64 | static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) |
58 | { | 65 | { |
59 | return container_of(intel_attached_encoder(connector), | 66 | return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base); |
60 | struct intel_hdmi, base); | ||
61 | } | 67 | } |
62 | 68 | ||
63 | void intel_dip_infoframe_csum(struct dip_infoframe *frame) | 69 | void intel_dip_infoframe_csum(struct dip_infoframe *frame) |
@@ -754,16 +760,16 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector, | |||
754 | return MODE_OK; | 760 | return MODE_OK; |
755 | } | 761 | } |
756 | 762 | ||
757 | static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, | 763 | bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, |
758 | const struct drm_display_mode *mode, | 764 | const struct drm_display_mode *mode, |
759 | struct drm_display_mode *adjusted_mode) | 765 | struct drm_display_mode *adjusted_mode) |
760 | { | 766 | { |
761 | return true; | 767 | return true; |
762 | } | 768 | } |
763 | 769 | ||
764 | static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi) | 770 | static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi) |
765 | { | 771 | { |
766 | struct drm_device *dev = intel_hdmi->base.base.dev; | 772 | struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi); |
767 | struct drm_i915_private *dev_priv = dev->dev_private; | 773 | struct drm_i915_private *dev_priv = dev->dev_private; |
768 | uint32_t bit; | 774 | uint32_t bit; |
769 | 775 | ||
@@ -786,6 +792,9 @@ static enum drm_connector_status | |||
786 | intel_hdmi_detect(struct drm_connector *connector, bool force) | 792 | intel_hdmi_detect(struct drm_connector *connector, bool force) |
787 | { | 793 | { |
788 | struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); | 794 | struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); |
795 | struct intel_digital_port *intel_dig_port = | ||
796 | hdmi_to_dig_port(intel_hdmi); | ||
797 | struct intel_encoder *intel_encoder = &intel_dig_port->base; | ||
789 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 798 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
790 | struct edid *edid; | 799 | struct edid *edid; |
791 | enum drm_connector_status status = connector_status_disconnected; | 800 | enum drm_connector_status status = connector_status_disconnected; |
@@ -814,6 +823,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) | |||
814 | if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO) | 823 | if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO) |
815 | intel_hdmi->has_audio = | 824 | intel_hdmi->has_audio = |
816 | (intel_hdmi->force_audio == HDMI_AUDIO_ON); | 825 | (intel_hdmi->force_audio == HDMI_AUDIO_ON); |
826 | intel_encoder->type = INTEL_OUTPUT_HDMI; | ||
817 | } | 827 | } |
818 | 828 | ||
819 | return status; | 829 | return status; |
@@ -859,6 +869,8 @@ intel_hdmi_set_property(struct drm_connector *connector, | |||
859 | uint64_t val) | 869 | uint64_t val) |
860 | { | 870 | { |
861 | struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); | 871 | struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); |
872 | struct intel_digital_port *intel_dig_port = | ||
873 | hdmi_to_dig_port(intel_hdmi); | ||
862 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 874 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
863 | int ret; | 875 | int ret; |
864 | 876 | ||
@@ -898,8 +910,8 @@ intel_hdmi_set_property(struct drm_connector *connector, | |||
898 | return -EINVAL; | 910 | return -EINVAL; |
899 | 911 | ||
900 | done: | 912 | done: |
901 | if (intel_hdmi->base.base.crtc) { | 913 | if (intel_dig_port->base.base.crtc) { |
902 | struct drm_crtc *crtc = intel_hdmi->base.base.crtc; | 914 | struct drm_crtc *crtc = intel_dig_port->base.base.crtc; |
903 | intel_set_mode(crtc, &crtc->mode, | 915 | intel_set_mode(crtc, &crtc->mode, |
904 | crtc->x, crtc->y, crtc->fb); | 916 | crtc->x, crtc->y, crtc->fb); |
905 | } | 917 | } |
@@ -914,12 +926,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector) | |||
914 | kfree(connector); | 926 | kfree(connector); |
915 | } | 927 | } |
916 | 928 | ||
917 | static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = { | ||
918 | .mode_fixup = intel_hdmi_mode_fixup, | ||
919 | .mode_set = intel_ddi_mode_set, | ||
920 | .disable = intel_encoder_noop, | ||
921 | }; | ||
922 | |||
923 | static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { | 929 | static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { |
924 | .mode_fixup = intel_hdmi_mode_fixup, | 930 | .mode_fixup = intel_hdmi_mode_fixup, |
925 | .mode_set = intel_hdmi_mode_set, | 931 | .mode_set = intel_hdmi_mode_set, |
@@ -951,43 +957,24 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c | |||
951 | intel_attach_broadcast_rgb_property(connector); | 957 | intel_attach_broadcast_rgb_property(connector); |
952 | } | 958 | } |
953 | 959 | ||
954 | void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port) | 960 | void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, |
961 | struct intel_connector *intel_connector) | ||
955 | { | 962 | { |
963 | struct drm_connector *connector = &intel_connector->base; | ||
964 | struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; | ||
965 | struct intel_encoder *intel_encoder = &intel_dig_port->base; | ||
966 | struct drm_device *dev = intel_encoder->base.dev; | ||
956 | struct drm_i915_private *dev_priv = dev->dev_private; | 967 | struct drm_i915_private *dev_priv = dev->dev_private; |
957 | struct drm_connector *connector; | 968 | enum port port = intel_dig_port->port; |
958 | struct intel_encoder *intel_encoder; | ||
959 | struct intel_connector *intel_connector; | ||
960 | struct intel_hdmi *intel_hdmi; | ||
961 | 969 | ||
962 | intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL); | ||
963 | if (!intel_hdmi) | ||
964 | return; | ||
965 | |||
966 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); | ||
967 | if (!intel_connector) { | ||
968 | kfree(intel_hdmi); | ||
969 | return; | ||
970 | } | ||
971 | |||
972 | intel_encoder = &intel_hdmi->base; | ||
973 | drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, | ||
974 | DRM_MODE_ENCODER_TMDS); | ||
975 | |||
976 | connector = &intel_connector->base; | ||
977 | drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, | 970 | drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, |
978 | DRM_MODE_CONNECTOR_HDMIA); | 971 | DRM_MODE_CONNECTOR_HDMIA); |
979 | drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); | 972 | drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); |
980 | 973 | ||
981 | intel_encoder->type = INTEL_OUTPUT_HDMI; | ||
982 | |||
983 | connector->polled = DRM_CONNECTOR_POLL_HPD; | 974 | connector->polled = DRM_CONNECTOR_POLL_HPD; |
984 | connector->interlace_allowed = 1; | 975 | connector->interlace_allowed = 1; |
985 | connector->doublescan_allowed = 0; | 976 | connector->doublescan_allowed = 0; |
986 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); | ||
987 | |||
988 | intel_encoder->cloneable = false; | ||
989 | 977 | ||
990 | intel_hdmi->ddi_port = port; | ||
991 | switch (port) { | 978 | switch (port) { |
992 | case PORT_B: | 979 | case PORT_B: |
993 | intel_hdmi->ddc_bus = GMBUS_PORT_DPB; | 980 | intel_hdmi->ddc_bus = GMBUS_PORT_DPB; |
@@ -1007,8 +994,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port) | |||
1007 | BUG(); | 994 | BUG(); |
1008 | } | 995 | } |
1009 | 996 | ||
1010 | intel_hdmi->sdvox_reg = sdvox_reg; | ||
1011 | |||
1012 | if (!HAS_PCH_SPLIT(dev)) { | 997 | if (!HAS_PCH_SPLIT(dev)) { |
1013 | intel_hdmi->write_infoframe = g4x_write_infoframe; | 998 | intel_hdmi->write_infoframe = g4x_write_infoframe; |
1014 | intel_hdmi->set_infoframes = g4x_set_infoframes; | 999 | intel_hdmi->set_infoframes = g4x_set_infoframes; |
@@ -1026,21 +1011,10 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port) | |||
1026 | intel_hdmi->set_infoframes = cpt_set_infoframes; | 1011 | intel_hdmi->set_infoframes = cpt_set_infoframes; |
1027 | } | 1012 | } |
1028 | 1013 | ||
1029 | if (IS_HASWELL(dev)) { | 1014 | if (IS_HASWELL(dev)) |
1030 | intel_encoder->enable = intel_enable_ddi; | 1015 | intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; |
1031 | intel_encoder->disable = intel_disable_ddi; | 1016 | else |
1032 | intel_encoder->get_hw_state = intel_ddi_get_hw_state; | 1017 | intel_connector->get_hw_state = intel_connector_get_hw_state; |
1033 | drm_encoder_helper_add(&intel_encoder->base, | ||
1034 | &intel_hdmi_helper_funcs_hsw); | ||
1035 | } else { | ||
1036 | intel_encoder->enable = intel_enable_hdmi; | ||
1037 | intel_encoder->disable = intel_disable_hdmi; | ||
1038 | intel_encoder->get_hw_state = intel_hdmi_get_hw_state; | ||
1039 | drm_encoder_helper_add(&intel_encoder->base, | ||
1040 | &intel_hdmi_helper_funcs); | ||
1041 | } | ||
1042 | intel_connector->get_hw_state = intel_connector_get_hw_state; | ||
1043 | |||
1044 | 1018 | ||
1045 | intel_hdmi_add_properties(intel_hdmi, connector); | 1019 | intel_hdmi_add_properties(intel_hdmi, connector); |
1046 | 1020 | ||
@@ -1056,3 +1030,42 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port) | |||
1056 | I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); | 1030 | I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); |
1057 | } | 1031 | } |
1058 | } | 1032 | } |
1033 | |||
1034 | void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port) | ||
1035 | { | ||
1036 | struct intel_digital_port *intel_dig_port; | ||
1037 | struct intel_encoder *intel_encoder; | ||
1038 | struct drm_encoder *encoder; | ||
1039 | struct intel_connector *intel_connector; | ||
1040 | |||
1041 | intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); | ||
1042 | if (!intel_dig_port) | ||
1043 | return; | ||
1044 | |||
1045 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); | ||
1046 | if (!intel_connector) { | ||
1047 | kfree(intel_dig_port); | ||
1048 | return; | ||
1049 | } | ||
1050 | |||
1051 | intel_encoder = &intel_dig_port->base; | ||
1052 | encoder = &intel_encoder->base; | ||
1053 | |||
1054 | drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, | ||
1055 | DRM_MODE_ENCODER_TMDS); | ||
1056 | drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); | ||
1057 | |||
1058 | intel_encoder->enable = intel_enable_hdmi; | ||
1059 | intel_encoder->disable = intel_disable_hdmi; | ||
1060 | intel_encoder->get_hw_state = intel_hdmi_get_hw_state; | ||
1061 | |||
1062 | intel_encoder->type = INTEL_OUTPUT_HDMI; | ||
1063 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); | ||
1064 | intel_encoder->cloneable = false; | ||
1065 | |||
1066 | intel_dig_port->port = port; | ||
1067 | intel_dig_port->hdmi.sdvox_reg = sdvox_reg; | ||
1068 | intel_dig_port->dp.output_reg = 0; | ||
1069 | |||
1070 | intel_hdmi_init_connector(intel_dig_port, intel_connector); | ||
1071 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index c2c6dbc0971c..3ef5af15b812 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -432,7 +432,7 @@ timeout: | |||
432 | I915_WRITE(GMBUS0 + reg_offset, 0); | 432 | I915_WRITE(GMBUS0 + reg_offset, 0); |
433 | 433 | ||
434 | /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ | 434 | /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ |
435 | bus->force_bit = true; | 435 | bus->force_bit = 1; |
436 | ret = i2c_bit_algo.master_xfer(adapter, msgs, num); | 436 | ret = i2c_bit_algo.master_xfer(adapter, msgs, num); |
437 | 437 | ||
438 | out: | 438 | out: |
@@ -491,7 +491,7 @@ int intel_setup_gmbus(struct drm_device *dev) | |||
491 | 491 | ||
492 | /* gmbus seems to be broken on i830 */ | 492 | /* gmbus seems to be broken on i830 */ |
493 | if (IS_I830(dev)) | 493 | if (IS_I830(dev)) |
494 | bus->force_bit = true; | 494 | bus->force_bit = 1; |
495 | 495 | ||
496 | intel_gpio_setup(bus, port); | 496 | intel_gpio_setup(bus, port); |
497 | 497 | ||
@@ -532,7 +532,10 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) | |||
532 | { | 532 | { |
533 | struct intel_gmbus *bus = to_intel_gmbus(adapter); | 533 | struct intel_gmbus *bus = to_intel_gmbus(adapter); |
534 | 534 | ||
535 | bus->force_bit = force_bit; | 535 | bus->force_bit += force_bit ? 1 : -1; |
536 | DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n", | ||
537 | force_bit ? "en" : "dis", adapter->name, | ||
538 | bus->force_bit); | ||
536 | } | 539 | } |
537 | 540 | ||
538 | void intel_teardown_gmbus(struct drm_device *dev) | 541 | void intel_teardown_gmbus(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index edba93b3474b..d8318821f37b 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -40,28 +40,30 @@ | |||
40 | #include <linux/acpi.h> | 40 | #include <linux/acpi.h> |
41 | 41 | ||
42 | /* Private structure for the integrated LVDS support */ | 42 | /* Private structure for the integrated LVDS support */ |
43 | struct intel_lvds { | 43 | struct intel_lvds_connector { |
44 | struct intel_encoder base; | 44 | struct intel_connector base; |
45 | 45 | ||
46 | struct edid *edid; | 46 | struct notifier_block lid_notifier; |
47 | }; | ||
48 | |||
49 | struct intel_lvds_encoder { | ||
50 | struct intel_encoder base; | ||
47 | 51 | ||
48 | int fitting_mode; | ||
49 | u32 pfit_control; | 52 | u32 pfit_control; |
50 | u32 pfit_pgm_ratios; | 53 | u32 pfit_pgm_ratios; |
51 | bool pfit_dirty; | 54 | bool pfit_dirty; |
52 | 55 | ||
53 | struct drm_display_mode *fixed_mode; | 56 | struct intel_lvds_connector *attached_connector; |
54 | }; | 57 | }; |
55 | 58 | ||
56 | static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder) | 59 | static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder) |
57 | { | 60 | { |
58 | return container_of(encoder, struct intel_lvds, base.base); | 61 | return container_of(encoder, struct intel_lvds_encoder, base.base); |
59 | } | 62 | } |
60 | 63 | ||
61 | static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector) | 64 | static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector) |
62 | { | 65 | { |
63 | return container_of(intel_attached_encoder(connector), | 66 | return container_of(connector, struct intel_lvds_connector, base.base); |
64 | struct intel_lvds, base); | ||
65 | } | 67 | } |
66 | 68 | ||
67 | static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, | 69 | static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, |
@@ -96,7 +98,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, | |||
96 | static void intel_enable_lvds(struct intel_encoder *encoder) | 98 | static void intel_enable_lvds(struct intel_encoder *encoder) |
97 | { | 99 | { |
98 | struct drm_device *dev = encoder->base.dev; | 100 | struct drm_device *dev = encoder->base.dev; |
99 | struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base); | 101 | struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); |
100 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); | 102 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); |
101 | struct drm_i915_private *dev_priv = dev->dev_private; | 103 | struct drm_i915_private *dev_priv = dev->dev_private; |
102 | u32 ctl_reg, lvds_reg, stat_reg; | 104 | u32 ctl_reg, lvds_reg, stat_reg; |
@@ -113,7 +115,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder) | |||
113 | 115 | ||
114 | I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); | 116 | I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); |
115 | 117 | ||
116 | if (intel_lvds->pfit_dirty) { | 118 | if (lvds_encoder->pfit_dirty) { |
117 | /* | 119 | /* |
118 | * Enable automatic panel scaling so that non-native modes | 120 | * Enable automatic panel scaling so that non-native modes |
119 | * fill the screen. The panel fitter should only be | 121 | * fill the screen. The panel fitter should only be |
@@ -121,12 +123,12 @@ static void intel_enable_lvds(struct intel_encoder *encoder) | |||
121 | * register description and PRM. | 123 | * register description and PRM. |
122 | */ | 124 | */ |
123 | DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", | 125 | DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", |
124 | intel_lvds->pfit_control, | 126 | lvds_encoder->pfit_control, |
125 | intel_lvds->pfit_pgm_ratios); | 127 | lvds_encoder->pfit_pgm_ratios); |
126 | 128 | ||
127 | I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); | 129 | I915_WRITE(PFIT_PGM_RATIOS, lvds_encoder->pfit_pgm_ratios); |
128 | I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); | 130 | I915_WRITE(PFIT_CONTROL, lvds_encoder->pfit_control); |
129 | intel_lvds->pfit_dirty = false; | 131 | lvds_encoder->pfit_dirty = false; |
130 | } | 132 | } |
131 | 133 | ||
132 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); | 134 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); |
@@ -140,7 +142,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder) | |||
140 | static void intel_disable_lvds(struct intel_encoder *encoder) | 142 | static void intel_disable_lvds(struct intel_encoder *encoder) |
141 | { | 143 | { |
142 | struct drm_device *dev = encoder->base.dev; | 144 | struct drm_device *dev = encoder->base.dev; |
143 | struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base); | 145 | struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); |
144 | struct drm_i915_private *dev_priv = dev->dev_private; | 146 | struct drm_i915_private *dev_priv = dev->dev_private; |
145 | u32 ctl_reg, lvds_reg, stat_reg; | 147 | u32 ctl_reg, lvds_reg, stat_reg; |
146 | 148 | ||
@@ -160,9 +162,9 @@ static void intel_disable_lvds(struct intel_encoder *encoder) | |||
160 | if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000)) | 162 | if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000)) |
161 | DRM_ERROR("timed out waiting for panel to power off\n"); | 163 | DRM_ERROR("timed out waiting for panel to power off\n"); |
162 | 164 | ||
163 | if (intel_lvds->pfit_control) { | 165 | if (lvds_encoder->pfit_control) { |
164 | I915_WRITE(PFIT_CONTROL, 0); | 166 | I915_WRITE(PFIT_CONTROL, 0); |
165 | intel_lvds->pfit_dirty = true; | 167 | lvds_encoder->pfit_dirty = true; |
166 | } | 168 | } |
167 | 169 | ||
168 | I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); | 170 | I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); |
@@ -172,8 +174,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder) | |||
172 | static int intel_lvds_mode_valid(struct drm_connector *connector, | 174 | static int intel_lvds_mode_valid(struct drm_connector *connector, |
173 | struct drm_display_mode *mode) | 175 | struct drm_display_mode *mode) |
174 | { | 176 | { |
175 | struct intel_lvds *intel_lvds = intel_attached_lvds(connector); | 177 | struct intel_connector *intel_connector = to_intel_connector(connector); |
176 | struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode; | 178 | struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; |
177 | 179 | ||
178 | if (mode->hdisplay > fixed_mode->hdisplay) | 180 | if (mode->hdisplay > fixed_mode->hdisplay) |
179 | return MODE_PANEL; | 181 | return MODE_PANEL; |
@@ -249,8 +251,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
249 | { | 251 | { |
250 | struct drm_device *dev = encoder->dev; | 252 | struct drm_device *dev = encoder->dev; |
251 | struct drm_i915_private *dev_priv = dev->dev_private; | 253 | struct drm_i915_private *dev_priv = dev->dev_private; |
252 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); | 254 | struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); |
253 | struct intel_crtc *intel_crtc = intel_lvds->base.new_crtc; | 255 | struct intel_connector *intel_connector = |
256 | &lvds_encoder->attached_connector->base; | ||
257 | struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc; | ||
254 | u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; | 258 | u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; |
255 | int pipe; | 259 | int pipe; |
256 | 260 | ||
@@ -260,7 +264,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
260 | return false; | 264 | return false; |
261 | } | 265 | } |
262 | 266 | ||
263 | if (intel_encoder_check_is_cloned(&intel_lvds->base)) | 267 | if (intel_encoder_check_is_cloned(&lvds_encoder->base)) |
264 | return false; | 268 | return false; |
265 | 269 | ||
266 | /* | 270 | /* |
@@ -269,10 +273,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
269 | * with the panel scaling set up to source from the H/VDisplay | 273 | * with the panel scaling set up to source from the H/VDisplay |
270 | * of the original mode. | 274 | * of the original mode. |
271 | */ | 275 | */ |
272 | intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode); | 276 | intel_fixed_panel_mode(intel_connector->panel.fixed_mode, |
277 | adjusted_mode); | ||
273 | 278 | ||
274 | if (HAS_PCH_SPLIT(dev)) { | 279 | if (HAS_PCH_SPLIT(dev)) { |
275 | intel_pch_panel_fitting(dev, intel_lvds->fitting_mode, | 280 | intel_pch_panel_fitting(dev, |
281 | intel_connector->panel.fitting_mode, | ||
276 | mode, adjusted_mode); | 282 | mode, adjusted_mode); |
277 | return true; | 283 | return true; |
278 | } | 284 | } |
@@ -298,7 +304,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
298 | 304 | ||
299 | drm_mode_set_crtcinfo(adjusted_mode, 0); | 305 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
300 | 306 | ||
301 | switch (intel_lvds->fitting_mode) { | 307 | switch (intel_connector->panel.fitting_mode) { |
302 | case DRM_MODE_SCALE_CENTER: | 308 | case DRM_MODE_SCALE_CENTER: |
303 | /* | 309 | /* |
304 | * For centered modes, we have to calculate border widths & | 310 | * For centered modes, we have to calculate border widths & |
@@ -396,11 +402,11 @@ out: | |||
396 | if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither) | 402 | if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither) |
397 | pfit_control |= PANEL_8TO6_DITHER_ENABLE; | 403 | pfit_control |= PANEL_8TO6_DITHER_ENABLE; |
398 | 404 | ||
399 | if (pfit_control != intel_lvds->pfit_control || | 405 | if (pfit_control != lvds_encoder->pfit_control || |
400 | pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { | 406 | pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) { |
401 | intel_lvds->pfit_control = pfit_control; | 407 | lvds_encoder->pfit_control = pfit_control; |
402 | intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios; | 408 | lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios; |
403 | intel_lvds->pfit_dirty = true; | 409 | lvds_encoder->pfit_dirty = true; |
404 | } | 410 | } |
405 | dev_priv->lvds_border_bits = border; | 411 | dev_priv->lvds_border_bits = border; |
406 | 412 | ||
@@ -449,14 +455,20 @@ intel_lvds_detect(struct drm_connector *connector, bool force) | |||
449 | */ | 455 | */ |
450 | static int intel_lvds_get_modes(struct drm_connector *connector) | 456 | static int intel_lvds_get_modes(struct drm_connector *connector) |
451 | { | 457 | { |
452 | struct intel_lvds *intel_lvds = intel_attached_lvds(connector); | 458 | struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector); |
453 | struct drm_device *dev = connector->dev; | 459 | struct drm_device *dev = connector->dev; |
454 | struct drm_display_mode *mode; | 460 | struct drm_display_mode *mode; |
455 | 461 | ||
456 | if (intel_lvds->edid) | 462 | /* use cached edid if we have one */ |
457 | return drm_add_edid_modes(connector, intel_lvds->edid); | 463 | if (lvds_connector->base.edid) { |
464 | /* invalid edid */ | ||
465 | if (IS_ERR(lvds_connector->base.edid)) | ||
466 | return 0; | ||
467 | |||
468 | return drm_add_edid_modes(connector, lvds_connector->base.edid); | ||
469 | } | ||
458 | 470 | ||
459 | mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); | 471 | mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode); |
460 | if (mode == NULL) | 472 | if (mode == NULL) |
461 | return 0; | 473 | return 0; |
462 | 474 | ||
@@ -496,10 +508,11 @@ static const struct dmi_system_id intel_no_modeset_on_lid[] = { | |||
496 | static int intel_lid_notify(struct notifier_block *nb, unsigned long val, | 508 | static int intel_lid_notify(struct notifier_block *nb, unsigned long val, |
497 | void *unused) | 509 | void *unused) |
498 | { | 510 | { |
499 | struct drm_i915_private *dev_priv = | 511 | struct intel_lvds_connector *lvds_connector = |
500 | container_of(nb, struct drm_i915_private, lid_notifier); | 512 | container_of(nb, struct intel_lvds_connector, lid_notifier); |
501 | struct drm_device *dev = dev_priv->dev; | 513 | struct drm_connector *connector = &lvds_connector->base.base; |
502 | struct drm_connector *connector = dev_priv->int_lvds_connector; | 514 | struct drm_device *dev = connector->dev; |
515 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
503 | 516 | ||
504 | if (dev->switch_power_state != DRM_SWITCH_POWER_ON) | 517 | if (dev->switch_power_state != DRM_SWITCH_POWER_ON) |
505 | return NOTIFY_OK; | 518 | return NOTIFY_OK; |
@@ -508,9 +521,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, | |||
508 | * check and update the status of LVDS connector after receiving | 521 | * check and update the status of LVDS connector after receiving |
509 | * the LID nofication event. | 522 | * the LID nofication event. |
510 | */ | 523 | */ |
511 | if (connector) | 524 | connector->status = connector->funcs->detect(connector, false); |
512 | connector->status = connector->funcs->detect(connector, | ||
513 | false); | ||
514 | 525 | ||
515 | /* Don't force modeset on machines where it causes a GPU lockup */ | 526 | /* Don't force modeset on machines where it causes a GPU lockup */ |
516 | if (dmi_check_system(intel_no_modeset_on_lid)) | 527 | if (dmi_check_system(intel_no_modeset_on_lid)) |
@@ -541,13 +552,18 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, | |||
541 | */ | 552 | */ |
542 | static void intel_lvds_destroy(struct drm_connector *connector) | 553 | static void intel_lvds_destroy(struct drm_connector *connector) |
543 | { | 554 | { |
544 | struct drm_device *dev = connector->dev; | 555 | struct intel_lvds_connector *lvds_connector = |
545 | struct drm_i915_private *dev_priv = dev->dev_private; | 556 | to_lvds_connector(connector); |
546 | 557 | ||
547 | intel_panel_destroy_backlight(dev); | 558 | if (lvds_connector->lid_notifier.notifier_call) |
559 | acpi_lid_notifier_unregister(&lvds_connector->lid_notifier); | ||
560 | |||
561 | if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) | ||
562 | kfree(lvds_connector->base.edid); | ||
563 | |||
564 | intel_panel_destroy_backlight(connector->dev); | ||
565 | intel_panel_fini(&lvds_connector->base.panel); | ||
548 | 566 | ||
549 | if (dev_priv->lid_notifier.notifier_call) | ||
550 | acpi_lid_notifier_unregister(&dev_priv->lid_notifier); | ||
551 | drm_sysfs_connector_remove(connector); | 567 | drm_sysfs_connector_remove(connector); |
552 | drm_connector_cleanup(connector); | 568 | drm_connector_cleanup(connector); |
553 | kfree(connector); | 569 | kfree(connector); |
@@ -557,22 +573,24 @@ static int intel_lvds_set_property(struct drm_connector *connector, | |||
557 | struct drm_property *property, | 573 | struct drm_property *property, |
558 | uint64_t value) | 574 | uint64_t value) |
559 | { | 575 | { |
560 | struct intel_lvds *intel_lvds = intel_attached_lvds(connector); | 576 | struct intel_connector *intel_connector = to_intel_connector(connector); |
561 | struct drm_device *dev = connector->dev; | 577 | struct drm_device *dev = connector->dev; |
562 | 578 | ||
563 | if (property == dev->mode_config.scaling_mode_property) { | 579 | if (property == dev->mode_config.scaling_mode_property) { |
564 | struct drm_crtc *crtc = intel_lvds->base.base.crtc; | 580 | struct drm_crtc *crtc; |
565 | 581 | ||
566 | if (value == DRM_MODE_SCALE_NONE) { | 582 | if (value == DRM_MODE_SCALE_NONE) { |
567 | DRM_DEBUG_KMS("no scaling not supported\n"); | 583 | DRM_DEBUG_KMS("no scaling not supported\n"); |
568 | return -EINVAL; | 584 | return -EINVAL; |
569 | } | 585 | } |
570 | 586 | ||
571 | if (intel_lvds->fitting_mode == value) { | 587 | if (intel_connector->panel.fitting_mode == value) { |
572 | /* the LVDS scaling property is not changed */ | 588 | /* the LVDS scaling property is not changed */ |
573 | return 0; | 589 | return 0; |
574 | } | 590 | } |
575 | intel_lvds->fitting_mode = value; | 591 | intel_connector->panel.fitting_mode = value; |
592 | |||
593 | crtc = intel_attached_encoder(connector)->base.crtc; | ||
576 | if (crtc && crtc->enabled) { | 594 | if (crtc && crtc->enabled) { |
577 | /* | 595 | /* |
578 | * If the CRTC is enabled, the display will be changed | 596 | * If the CRTC is enabled, the display will be changed |
@@ -912,12 +930,15 @@ static bool intel_lvds_supported(struct drm_device *dev) | |||
912 | bool intel_lvds_init(struct drm_device *dev) | 930 | bool intel_lvds_init(struct drm_device *dev) |
913 | { | 931 | { |
914 | struct drm_i915_private *dev_priv = dev->dev_private; | 932 | struct drm_i915_private *dev_priv = dev->dev_private; |
915 | struct intel_lvds *intel_lvds; | 933 | struct intel_lvds_encoder *lvds_encoder; |
916 | struct intel_encoder *intel_encoder; | 934 | struct intel_encoder *intel_encoder; |
935 | struct intel_lvds_connector *lvds_connector; | ||
917 | struct intel_connector *intel_connector; | 936 | struct intel_connector *intel_connector; |
918 | struct drm_connector *connector; | 937 | struct drm_connector *connector; |
919 | struct drm_encoder *encoder; | 938 | struct drm_encoder *encoder; |
920 | struct drm_display_mode *scan; /* *modes, *bios_mode; */ | 939 | struct drm_display_mode *scan; /* *modes, *bios_mode; */ |
940 | struct drm_display_mode *fixed_mode = NULL; | ||
941 | struct edid *edid; | ||
921 | struct drm_crtc *crtc; | 942 | struct drm_crtc *crtc; |
922 | u32 lvds; | 943 | u32 lvds; |
923 | int pipe; | 944 | int pipe; |
@@ -945,23 +966,25 @@ bool intel_lvds_init(struct drm_device *dev) | |||
945 | } | 966 | } |
946 | } | 967 | } |
947 | 968 | ||
948 | intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL); | 969 | lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL); |
949 | if (!intel_lvds) { | 970 | if (!lvds_encoder) |
950 | return false; | 971 | return false; |
951 | } | ||
952 | 972 | ||
953 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); | 973 | lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL); |
954 | if (!intel_connector) { | 974 | if (!lvds_connector) { |
955 | kfree(intel_lvds); | 975 | kfree(lvds_encoder); |
956 | return false; | 976 | return false; |
957 | } | 977 | } |
958 | 978 | ||
979 | lvds_encoder->attached_connector = lvds_connector; | ||
980 | |||
959 | if (!HAS_PCH_SPLIT(dev)) { | 981 | if (!HAS_PCH_SPLIT(dev)) { |
960 | intel_lvds->pfit_control = I915_READ(PFIT_CONTROL); | 982 | lvds_encoder->pfit_control = I915_READ(PFIT_CONTROL); |
961 | } | 983 | } |
962 | 984 | ||
963 | intel_encoder = &intel_lvds->base; | 985 | intel_encoder = &lvds_encoder->base; |
964 | encoder = &intel_encoder->base; | 986 | encoder = &intel_encoder->base; |
987 | intel_connector = &lvds_connector->base; | ||
965 | connector = &intel_connector->base; | 988 | connector = &intel_connector->base; |
966 | drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, | 989 | drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, |
967 | DRM_MODE_CONNECTOR_LVDS); | 990 | DRM_MODE_CONNECTOR_LVDS); |
@@ -993,14 +1016,10 @@ bool intel_lvds_init(struct drm_device *dev) | |||
993 | 1016 | ||
994 | /* create the scaling mode property */ | 1017 | /* create the scaling mode property */ |
995 | drm_mode_create_scaling_mode_property(dev); | 1018 | drm_mode_create_scaling_mode_property(dev); |
996 | /* | ||
997 | * the initial panel fitting mode will be FULL_SCREEN. | ||
998 | */ | ||
999 | |||
1000 | drm_connector_attach_property(&intel_connector->base, | 1019 | drm_connector_attach_property(&intel_connector->base, |
1001 | dev->mode_config.scaling_mode_property, | 1020 | dev->mode_config.scaling_mode_property, |
1002 | DRM_MODE_SCALE_ASPECT); | 1021 | DRM_MODE_SCALE_ASPECT); |
1003 | intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT; | 1022 | intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; |
1004 | /* | 1023 | /* |
1005 | * LVDS discovery: | 1024 | * LVDS discovery: |
1006 | * 1) check for EDID on DDC | 1025 | * 1) check for EDID on DDC |
@@ -1015,20 +1034,21 @@ bool intel_lvds_init(struct drm_device *dev) | |||
1015 | * Attempt to get the fixed panel mode from DDC. Assume that the | 1034 | * Attempt to get the fixed panel mode from DDC. Assume that the |
1016 | * preferred mode is the right one. | 1035 | * preferred mode is the right one. |
1017 | */ | 1036 | */ |
1018 | intel_lvds->edid = drm_get_edid(connector, | 1037 | edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin)); |
1019 | intel_gmbus_get_adapter(dev_priv, | 1038 | if (edid) { |
1020 | pin)); | 1039 | if (drm_add_edid_modes(connector, edid)) { |
1021 | if (intel_lvds->edid) { | ||
1022 | if (drm_add_edid_modes(connector, | ||
1023 | intel_lvds->edid)) { | ||
1024 | drm_mode_connector_update_edid_property(connector, | 1040 | drm_mode_connector_update_edid_property(connector, |
1025 | intel_lvds->edid); | 1041 | edid); |
1026 | } else { | 1042 | } else { |
1027 | kfree(intel_lvds->edid); | 1043 | kfree(edid); |
1028 | intel_lvds->edid = NULL; | 1044 | edid = ERR_PTR(-EINVAL); |
1029 | } | 1045 | } |
1046 | } else { | ||
1047 | edid = ERR_PTR(-ENOENT); | ||
1030 | } | 1048 | } |
1031 | if (!intel_lvds->edid) { | 1049 | lvds_connector->base.edid = edid; |
1050 | |||
1051 | if (IS_ERR_OR_NULL(edid)) { | ||
1032 | /* Didn't get an EDID, so | 1052 | /* Didn't get an EDID, so |
1033 | * Set wide sync ranges so we get all modes | 1053 | * Set wide sync ranges so we get all modes |
1034 | * handed to valid_mode for checking | 1054 | * handed to valid_mode for checking |
@@ -1041,22 +1061,17 @@ bool intel_lvds_init(struct drm_device *dev) | |||
1041 | 1061 | ||
1042 | list_for_each_entry(scan, &connector->probed_modes, head) { | 1062 | list_for_each_entry(scan, &connector->probed_modes, head) { |
1043 | if (scan->type & DRM_MODE_TYPE_PREFERRED) { | 1063 | if (scan->type & DRM_MODE_TYPE_PREFERRED) { |
1044 | intel_lvds->fixed_mode = | 1064 | fixed_mode = drm_mode_duplicate(dev, scan); |
1045 | drm_mode_duplicate(dev, scan); | 1065 | intel_find_lvds_downclock(dev, fixed_mode, connector); |
1046 | intel_find_lvds_downclock(dev, | ||
1047 | intel_lvds->fixed_mode, | ||
1048 | connector); | ||
1049 | goto out; | 1066 | goto out; |
1050 | } | 1067 | } |
1051 | } | 1068 | } |
1052 | 1069 | ||
1053 | /* Failed to get EDID, what about VBT? */ | 1070 | /* Failed to get EDID, what about VBT? */ |
1054 | if (dev_priv->lfp_lvds_vbt_mode) { | 1071 | if (dev_priv->lfp_lvds_vbt_mode) { |
1055 | intel_lvds->fixed_mode = | 1072 | fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); |
1056 | drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); | 1073 | if (fixed_mode) { |
1057 | if (intel_lvds->fixed_mode) { | 1074 | fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; |
1058 | intel_lvds->fixed_mode->type |= | ||
1059 | DRM_MODE_TYPE_PREFERRED; | ||
1060 | goto out; | 1075 | goto out; |
1061 | } | 1076 | } |
1062 | } | 1077 | } |
@@ -1076,16 +1091,15 @@ bool intel_lvds_init(struct drm_device *dev) | |||
1076 | crtc = intel_get_crtc_for_pipe(dev, pipe); | 1091 | crtc = intel_get_crtc_for_pipe(dev, pipe); |
1077 | 1092 | ||
1078 | if (crtc && (lvds & LVDS_PORT_EN)) { | 1093 | if (crtc && (lvds & LVDS_PORT_EN)) { |
1079 | intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc); | 1094 | fixed_mode = intel_crtc_mode_get(dev, crtc); |
1080 | if (intel_lvds->fixed_mode) { | 1095 | if (fixed_mode) { |
1081 | intel_lvds->fixed_mode->type |= | 1096 | fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; |
1082 | DRM_MODE_TYPE_PREFERRED; | ||
1083 | goto out; | 1097 | goto out; |
1084 | } | 1098 | } |
1085 | } | 1099 | } |
1086 | 1100 | ||
1087 | /* If we still don't have a mode after all that, give up. */ | 1101 | /* If we still don't have a mode after all that, give up. */ |
1088 | if (!intel_lvds->fixed_mode) | 1102 | if (!fixed_mode) |
1089 | goto failed; | 1103 | goto failed; |
1090 | 1104 | ||
1091 | out: | 1105 | out: |
@@ -1100,16 +1114,15 @@ out: | |||
1100 | I915_WRITE(PP_CONTROL, | 1114 | I915_WRITE(PP_CONTROL, |
1101 | I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); | 1115 | I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); |
1102 | } | 1116 | } |
1103 | dev_priv->lid_notifier.notifier_call = intel_lid_notify; | 1117 | lvds_connector->lid_notifier.notifier_call = intel_lid_notify; |
1104 | if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) { | 1118 | if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) { |
1105 | DRM_DEBUG_KMS("lid notifier registration failed\n"); | 1119 | DRM_DEBUG_KMS("lid notifier registration failed\n"); |
1106 | dev_priv->lid_notifier.notifier_call = NULL; | 1120 | lvds_connector->lid_notifier.notifier_call = NULL; |
1107 | } | 1121 | } |
1108 | /* keep the LVDS connector */ | ||
1109 | dev_priv->int_lvds_connector = connector; | ||
1110 | drm_sysfs_connector_add(connector); | 1122 | drm_sysfs_connector_add(connector); |
1111 | 1123 | ||
1112 | intel_panel_setup_backlight(dev); | 1124 | intel_panel_init(&intel_connector->panel, fixed_mode); |
1125 | intel_panel_setup_backlight(connector); | ||
1113 | 1126 | ||
1114 | return true; | 1127 | return true; |
1115 | 1128 | ||
@@ -1117,7 +1130,9 @@ failed: | |||
1117 | DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); | 1130 | DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); |
1118 | drm_connector_cleanup(connector); | 1131 | drm_connector_cleanup(connector); |
1119 | drm_encoder_cleanup(encoder); | 1132 | drm_encoder_cleanup(encoder); |
1120 | kfree(intel_lvds); | 1133 | if (fixed_mode) |
1121 | kfree(intel_connector); | 1134 | drm_mode_destroy(dev, fixed_mode); |
1135 | kfree(lvds_encoder); | ||
1136 | kfree(lvds_connector); | ||
1122 | return false; | 1137 | return false; |
1123 | } | 1138 | } |
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index cabd84bf66eb..d49985fcb27f 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c | |||
@@ -45,7 +45,6 @@ int intel_connector_update_modes(struct drm_connector *connector, | |||
45 | drm_mode_connector_update_edid_property(connector, edid); | 45 | drm_mode_connector_update_edid_property(connector, edid); |
46 | ret = drm_add_edid_modes(connector, edid); | 46 | ret = drm_add_edid_modes(connector, edid); |
47 | drm_edid_to_eld(connector, edid); | 47 | drm_edid_to_eld(connector, edid); |
48 | kfree(edid); | ||
49 | 48 | ||
50 | return ret; | 49 | return ret; |
51 | } | 50 | } |
@@ -61,12 +60,16 @@ int intel_ddc_get_modes(struct drm_connector *connector, | |||
61 | struct i2c_adapter *adapter) | 60 | struct i2c_adapter *adapter) |
62 | { | 61 | { |
63 | struct edid *edid; | 62 | struct edid *edid; |
63 | int ret; | ||
64 | 64 | ||
65 | edid = drm_get_edid(connector, adapter); | 65 | edid = drm_get_edid(connector, adapter); |
66 | if (!edid) | 66 | if (!edid) |
67 | return 0; | 67 | return 0; |
68 | 68 | ||
69 | return intel_connector_update_modes(connector, edid); | 69 | ret = intel_connector_update_modes(connector, edid); |
70 | kfree(edid); | ||
71 | |||
72 | return ret; | ||
70 | } | 73 | } |
71 | 74 | ||
72 | static const struct drm_prop_enum_list force_audio_names[] = { | 75 | static const struct drm_prop_enum_list force_audio_names[] = { |
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 5530413213d8..7741c22c934c 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
@@ -154,6 +154,8 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) | |||
154 | struct opregion_asle __iomem *asle = dev_priv->opregion.asle; | 154 | struct opregion_asle __iomem *asle = dev_priv->opregion.asle; |
155 | u32 max; | 155 | u32 max; |
156 | 156 | ||
157 | DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); | ||
158 | |||
157 | if (!(bclp & ASLE_BCLP_VALID)) | 159 | if (!(bclp & ASLE_BCLP_VALID)) |
158 | return ASLE_BACKLIGHT_FAILED; | 160 | return ASLE_BACKLIGHT_FAILED; |
159 | 161 | ||
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index e2aacd329545..41d463573baa 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -138,24 +138,24 @@ static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) | |||
138 | 138 | ||
139 | if (HAS_PCH_SPLIT(dev_priv->dev)) { | 139 | if (HAS_PCH_SPLIT(dev_priv->dev)) { |
140 | val = I915_READ(BLC_PWM_PCH_CTL2); | 140 | val = I915_READ(BLC_PWM_PCH_CTL2); |
141 | if (dev_priv->saveBLC_PWM_CTL2 == 0) { | 141 | if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) { |
142 | dev_priv->saveBLC_PWM_CTL2 = val; | 142 | dev_priv->regfile.saveBLC_PWM_CTL2 = val; |
143 | } else if (val == 0) { | 143 | } else if (val == 0) { |
144 | I915_WRITE(BLC_PWM_PCH_CTL2, | 144 | I915_WRITE(BLC_PWM_PCH_CTL2, |
145 | dev_priv->saveBLC_PWM_CTL2); | 145 | dev_priv->regfile.saveBLC_PWM_CTL2); |
146 | val = dev_priv->saveBLC_PWM_CTL2; | 146 | val = dev_priv->regfile.saveBLC_PWM_CTL2; |
147 | } | 147 | } |
148 | } else { | 148 | } else { |
149 | val = I915_READ(BLC_PWM_CTL); | 149 | val = I915_READ(BLC_PWM_CTL); |
150 | if (dev_priv->saveBLC_PWM_CTL == 0) { | 150 | if (dev_priv->regfile.saveBLC_PWM_CTL == 0) { |
151 | dev_priv->saveBLC_PWM_CTL = val; | 151 | dev_priv->regfile.saveBLC_PWM_CTL = val; |
152 | dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); | 152 | dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); |
153 | } else if (val == 0) { | 153 | } else if (val == 0) { |
154 | I915_WRITE(BLC_PWM_CTL, | 154 | I915_WRITE(BLC_PWM_CTL, |
155 | dev_priv->saveBLC_PWM_CTL); | 155 | dev_priv->regfile.saveBLC_PWM_CTL); |
156 | I915_WRITE(BLC_PWM_CTL2, | 156 | I915_WRITE(BLC_PWM_CTL2, |
157 | dev_priv->saveBLC_PWM_CTL2); | 157 | dev_priv->regfile.saveBLC_PWM_CTL2); |
158 | val = dev_priv->saveBLC_PWM_CTL; | 158 | val = dev_priv->regfile.saveBLC_PWM_CTL; |
159 | } | 159 | } |
160 | } | 160 | } |
161 | 161 | ||
@@ -416,21 +416,14 @@ static const struct backlight_ops intel_panel_bl_ops = { | |||
416 | .get_brightness = intel_panel_get_brightness, | 416 | .get_brightness = intel_panel_get_brightness, |
417 | }; | 417 | }; |
418 | 418 | ||
419 | int intel_panel_setup_backlight(struct drm_device *dev) | 419 | int intel_panel_setup_backlight(struct drm_connector *connector) |
420 | { | 420 | { |
421 | struct drm_device *dev = connector->dev; | ||
421 | struct drm_i915_private *dev_priv = dev->dev_private; | 422 | struct drm_i915_private *dev_priv = dev->dev_private; |
422 | struct backlight_properties props; | 423 | struct backlight_properties props; |
423 | struct drm_connector *connector; | ||
424 | 424 | ||
425 | intel_panel_init_backlight(dev); | 425 | intel_panel_init_backlight(dev); |
426 | 426 | ||
427 | if (dev_priv->int_lvds_connector) | ||
428 | connector = dev_priv->int_lvds_connector; | ||
429 | else if (dev_priv->int_edp_connector) | ||
430 | connector = dev_priv->int_edp_connector; | ||
431 | else | ||
432 | return -ENODEV; | ||
433 | |||
434 | memset(&props, 0, sizeof(props)); | 427 | memset(&props, 0, sizeof(props)); |
435 | props.type = BACKLIGHT_RAW; | 428 | props.type = BACKLIGHT_RAW; |
436 | props.max_brightness = _intel_panel_get_max_backlight(dev); | 429 | props.max_brightness = _intel_panel_get_max_backlight(dev); |
@@ -460,9 +453,9 @@ void intel_panel_destroy_backlight(struct drm_device *dev) | |||
460 | backlight_device_unregister(dev_priv->backlight); | 453 | backlight_device_unregister(dev_priv->backlight); |
461 | } | 454 | } |
462 | #else | 455 | #else |
463 | int intel_panel_setup_backlight(struct drm_device *dev) | 456 | int intel_panel_setup_backlight(struct drm_connector *connector) |
464 | { | 457 | { |
465 | intel_panel_init_backlight(dev); | 458 | intel_panel_init_backlight(connector->dev); |
466 | return 0; | 459 | return 0; |
467 | } | 460 | } |
468 | 461 | ||
@@ -471,3 +464,20 @@ void intel_panel_destroy_backlight(struct drm_device *dev) | |||
471 | return; | 464 | return; |
472 | } | 465 | } |
473 | #endif | 466 | #endif |
467 | |||
468 | int intel_panel_init(struct intel_panel *panel, | ||
469 | struct drm_display_mode *fixed_mode) | ||
470 | { | ||
471 | panel->fixed_mode = fixed_mode; | ||
472 | |||
473 | return 0; | ||
474 | } | ||
475 | |||
476 | void intel_panel_fini(struct intel_panel *panel) | ||
477 | { | ||
478 | struct intel_connector *intel_connector = | ||
479 | container_of(panel, struct intel_connector, panel); | ||
480 | |||
481 | if (panel->fixed_mode) | ||
482 | drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode); | ||
483 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 72f41aaa71ff..0cbc0e6402b4 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -1468,9 +1468,12 @@ static void i9xx_update_wm(struct drm_device *dev) | |||
1468 | fifo_size = dev_priv->display.get_fifo_size(dev, 0); | 1468 | fifo_size = dev_priv->display.get_fifo_size(dev, 0); |
1469 | crtc = intel_get_crtc_for_plane(dev, 0); | 1469 | crtc = intel_get_crtc_for_plane(dev, 0); |
1470 | if (crtc->enabled && crtc->fb) { | 1470 | if (crtc->enabled && crtc->fb) { |
1471 | int cpp = crtc->fb->bits_per_pixel / 8; | ||
1472 | if (IS_GEN2(dev)) | ||
1473 | cpp = 4; | ||
1474 | |||
1471 | planea_wm = intel_calculate_wm(crtc->mode.clock, | 1475 | planea_wm = intel_calculate_wm(crtc->mode.clock, |
1472 | wm_info, fifo_size, | 1476 | wm_info, fifo_size, cpp, |
1473 | crtc->fb->bits_per_pixel / 8, | ||
1474 | latency_ns); | 1477 | latency_ns); |
1475 | enabled = crtc; | 1478 | enabled = crtc; |
1476 | } else | 1479 | } else |
@@ -1479,9 +1482,12 @@ static void i9xx_update_wm(struct drm_device *dev) | |||
1479 | fifo_size = dev_priv->display.get_fifo_size(dev, 1); | 1482 | fifo_size = dev_priv->display.get_fifo_size(dev, 1); |
1480 | crtc = intel_get_crtc_for_plane(dev, 1); | 1483 | crtc = intel_get_crtc_for_plane(dev, 1); |
1481 | if (crtc->enabled && crtc->fb) { | 1484 | if (crtc->enabled && crtc->fb) { |
1485 | int cpp = crtc->fb->bits_per_pixel / 8; | ||
1486 | if (IS_GEN2(dev)) | ||
1487 | cpp = 4; | ||
1488 | |||
1482 | planeb_wm = intel_calculate_wm(crtc->mode.clock, | 1489 | planeb_wm = intel_calculate_wm(crtc->mode.clock, |
1483 | wm_info, fifo_size, | 1490 | wm_info, fifo_size, cpp, |
1484 | crtc->fb->bits_per_pixel / 8, | ||
1485 | latency_ns); | 1491 | latency_ns); |
1486 | if (enabled == NULL) | 1492 | if (enabled == NULL) |
1487 | enabled = crtc; | 1493 | enabled = crtc; |
@@ -1571,8 +1577,7 @@ static void i830_update_wm(struct drm_device *dev) | |||
1571 | 1577 | ||
1572 | planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, | 1578 | planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, |
1573 | dev_priv->display.get_fifo_size(dev, 0), | 1579 | dev_priv->display.get_fifo_size(dev, 0), |
1574 | crtc->fb->bits_per_pixel / 8, | 1580 | 4, latency_ns); |
1575 | latency_ns); | ||
1576 | fwater_lo = I915_READ(FW_BLC) & ~0xfff; | 1581 | fwater_lo = I915_READ(FW_BLC) & ~0xfff; |
1577 | fwater_lo |= (3<<8) | planea_wm; | 1582 | fwater_lo |= (3<<8) | planea_wm; |
1578 | 1583 | ||
@@ -2323,7 +2328,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val) | |||
2323 | struct drm_i915_private *dev_priv = dev->dev_private; | 2328 | struct drm_i915_private *dev_priv = dev->dev_private; |
2324 | u32 limits = gen6_rps_limits(dev_priv, &val); | 2329 | u32 limits = gen6_rps_limits(dev_priv, &val); |
2325 | 2330 | ||
2326 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | 2331 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
2327 | WARN_ON(val > dev_priv->rps.max_delay); | 2332 | WARN_ON(val > dev_priv->rps.max_delay); |
2328 | WARN_ON(val < dev_priv->rps.min_delay); | 2333 | WARN_ON(val < dev_priv->rps.min_delay); |
2329 | 2334 | ||
@@ -2404,12 +2409,12 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
2404 | struct intel_ring_buffer *ring; | 2409 | struct intel_ring_buffer *ring; |
2405 | u32 rp_state_cap; | 2410 | u32 rp_state_cap; |
2406 | u32 gt_perf_status; | 2411 | u32 gt_perf_status; |
2407 | u32 pcu_mbox, rc6_mask = 0; | 2412 | u32 rc6vids, pcu_mbox, rc6_mask = 0; |
2408 | u32 gtfifodbg; | 2413 | u32 gtfifodbg; |
2409 | int rc6_mode; | 2414 | int rc6_mode; |
2410 | int i; | 2415 | int i, ret; |
2411 | 2416 | ||
2412 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | 2417 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
2413 | 2418 | ||
2414 | /* Here begins a magic sequence of register writes to enable | 2419 | /* Here begins a magic sequence of register writes to enable |
2415 | * auto-downclocking. | 2420 | * auto-downclocking. |
@@ -2503,30 +2508,16 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
2503 | GEN6_RP_UP_BUSY_AVG | | 2508 | GEN6_RP_UP_BUSY_AVG | |
2504 | (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT)); | 2509 | (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT)); |
2505 | 2510 | ||
2506 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | 2511 | ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0); |
2507 | 500)) | 2512 | if (!ret) { |
2508 | DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); | 2513 | pcu_mbox = 0; |
2509 | 2514 | ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox); | |
2510 | I915_WRITE(GEN6_PCODE_DATA, 0); | 2515 | if (ret && pcu_mbox & (1<<31)) { /* OC supported */ |
2511 | I915_WRITE(GEN6_PCODE_MAILBOX, | 2516 | dev_priv->rps.max_delay = pcu_mbox & 0xff; |
2512 | GEN6_PCODE_READY | | 2517 | DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); |
2513 | GEN6_PCODE_WRITE_MIN_FREQ_TABLE); | 2518 | } |
2514 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | 2519 | } else { |
2515 | 500)) | 2520 | DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); |
2516 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); | ||
2517 | |||
2518 | /* Check for overclock support */ | ||
2519 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
2520 | 500)) | ||
2521 | DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); | ||
2522 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS); | ||
2523 | pcu_mbox = I915_READ(GEN6_PCODE_DATA); | ||
2524 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
2525 | 500)) | ||
2526 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); | ||
2527 | if (pcu_mbox & (1<<31)) { /* OC supported */ | ||
2528 | dev_priv->rps.max_delay = pcu_mbox & 0xff; | ||
2529 | DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); | ||
2530 | } | 2521 | } |
2531 | 2522 | ||
2532 | gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); | 2523 | gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); |
@@ -2540,6 +2531,20 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
2540 | /* enable all PM interrupts */ | 2531 | /* enable all PM interrupts */ |
2541 | I915_WRITE(GEN6_PMINTRMSK, 0); | 2532 | I915_WRITE(GEN6_PMINTRMSK, 0); |
2542 | 2533 | ||
2534 | rc6vids = 0; | ||
2535 | ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); | ||
2536 | if (IS_GEN6(dev) && ret) { | ||
2537 | DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); | ||
2538 | } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { | ||
2539 | DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", | ||
2540 | GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); | ||
2541 | rc6vids &= 0xffff00; | ||
2542 | rc6vids |= GEN6_ENCODE_RC6_VID(450); | ||
2543 | ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids); | ||
2544 | if (ret) | ||
2545 | DRM_ERROR("Couldn't fix incorrect rc6 voltage\n"); | ||
2546 | } | ||
2547 | |||
2543 | gen6_gt_force_wake_put(dev_priv); | 2548 | gen6_gt_force_wake_put(dev_priv); |
2544 | } | 2549 | } |
2545 | 2550 | ||
@@ -2550,7 +2555,7 @@ static void gen6_update_ring_freq(struct drm_device *dev) | |||
2550 | int gpu_freq, ia_freq, max_ia_freq; | 2555 | int gpu_freq, ia_freq, max_ia_freq; |
2551 | int scaling_factor = 180; | 2556 | int scaling_factor = 180; |
2552 | 2557 | ||
2553 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | 2558 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
2554 | 2559 | ||
2555 | max_ia_freq = cpufreq_quick_get_max(0); | 2560 | max_ia_freq = cpufreq_quick_get_max(0); |
2556 | /* | 2561 | /* |
@@ -2581,17 +2586,11 @@ static void gen6_update_ring_freq(struct drm_device *dev) | |||
2581 | else | 2586 | else |
2582 | ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); | 2587 | ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); |
2583 | ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); | 2588 | ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); |
2589 | ia_freq <<= GEN6_PCODE_FREQ_IA_RATIO_SHIFT; | ||
2584 | 2590 | ||
2585 | I915_WRITE(GEN6_PCODE_DATA, | 2591 | sandybridge_pcode_write(dev_priv, |
2586 | (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) | | 2592 | GEN6_PCODE_WRITE_MIN_FREQ_TABLE, |
2587 | gpu_freq); | 2593 | ia_freq | gpu_freq); |
2588 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | | ||
2589 | GEN6_PCODE_WRITE_MIN_FREQ_TABLE); | ||
2590 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & | ||
2591 | GEN6_PCODE_READY) == 0, 10)) { | ||
2592 | DRM_ERROR("pcode write of freq table timed out\n"); | ||
2593 | continue; | ||
2594 | } | ||
2595 | } | 2594 | } |
2596 | } | 2595 | } |
2597 | 2596 | ||
@@ -2599,16 +2598,16 @@ void ironlake_teardown_rc6(struct drm_device *dev) | |||
2599 | { | 2598 | { |
2600 | struct drm_i915_private *dev_priv = dev->dev_private; | 2599 | struct drm_i915_private *dev_priv = dev->dev_private; |
2601 | 2600 | ||
2602 | if (dev_priv->renderctx) { | 2601 | if (dev_priv->ips.renderctx) { |
2603 | i915_gem_object_unpin(dev_priv->renderctx); | 2602 | i915_gem_object_unpin(dev_priv->ips.renderctx); |
2604 | drm_gem_object_unreference(&dev_priv->renderctx->base); | 2603 | drm_gem_object_unreference(&dev_priv->ips.renderctx->base); |
2605 | dev_priv->renderctx = NULL; | 2604 | dev_priv->ips.renderctx = NULL; |
2606 | } | 2605 | } |
2607 | 2606 | ||
2608 | if (dev_priv->pwrctx) { | 2607 | if (dev_priv->ips.pwrctx) { |
2609 | i915_gem_object_unpin(dev_priv->pwrctx); | 2608 | i915_gem_object_unpin(dev_priv->ips.pwrctx); |
2610 | drm_gem_object_unreference(&dev_priv->pwrctx->base); | 2609 | drm_gem_object_unreference(&dev_priv->ips.pwrctx->base); |
2611 | dev_priv->pwrctx = NULL; | 2610 | dev_priv->ips.pwrctx = NULL; |
2612 | } | 2611 | } |
2613 | } | 2612 | } |
2614 | 2613 | ||
@@ -2634,14 +2633,14 @@ static int ironlake_setup_rc6(struct drm_device *dev) | |||
2634 | { | 2633 | { |
2635 | struct drm_i915_private *dev_priv = dev->dev_private; | 2634 | struct drm_i915_private *dev_priv = dev->dev_private; |
2636 | 2635 | ||
2637 | if (dev_priv->renderctx == NULL) | 2636 | if (dev_priv->ips.renderctx == NULL) |
2638 | dev_priv->renderctx = intel_alloc_context_page(dev); | 2637 | dev_priv->ips.renderctx = intel_alloc_context_page(dev); |
2639 | if (!dev_priv->renderctx) | 2638 | if (!dev_priv->ips.renderctx) |
2640 | return -ENOMEM; | 2639 | return -ENOMEM; |
2641 | 2640 | ||
2642 | if (dev_priv->pwrctx == NULL) | 2641 | if (dev_priv->ips.pwrctx == NULL) |
2643 | dev_priv->pwrctx = intel_alloc_context_page(dev); | 2642 | dev_priv->ips.pwrctx = intel_alloc_context_page(dev); |
2644 | if (!dev_priv->pwrctx) { | 2643 | if (!dev_priv->ips.pwrctx) { |
2645 | ironlake_teardown_rc6(dev); | 2644 | ironlake_teardown_rc6(dev); |
2646 | return -ENOMEM; | 2645 | return -ENOMEM; |
2647 | } | 2646 | } |
@@ -2679,7 +2678,7 @@ static void ironlake_enable_rc6(struct drm_device *dev) | |||
2679 | 2678 | ||
2680 | intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); | 2679 | intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); |
2681 | intel_ring_emit(ring, MI_SET_CONTEXT); | 2680 | intel_ring_emit(ring, MI_SET_CONTEXT); |
2682 | intel_ring_emit(ring, dev_priv->renderctx->gtt_offset | | 2681 | intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset | |
2683 | MI_MM_SPACE_GTT | | 2682 | MI_MM_SPACE_GTT | |
2684 | MI_SAVE_EXT_STATE_EN | | 2683 | MI_SAVE_EXT_STATE_EN | |
2685 | MI_RESTORE_EXT_STATE_EN | | 2684 | MI_RESTORE_EXT_STATE_EN | |
@@ -2701,7 +2700,7 @@ static void ironlake_enable_rc6(struct drm_device *dev) | |||
2701 | return; | 2700 | return; |
2702 | } | 2701 | } |
2703 | 2702 | ||
2704 | I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); | 2703 | I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN); |
2705 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); | 2704 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); |
2706 | } | 2705 | } |
2707 | 2706 | ||
@@ -3310,37 +3309,72 @@ static void intel_init_emon(struct drm_device *dev) | |||
3310 | 3309 | ||
3311 | void intel_disable_gt_powersave(struct drm_device *dev) | 3310 | void intel_disable_gt_powersave(struct drm_device *dev) |
3312 | { | 3311 | { |
3312 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3313 | |||
3313 | if (IS_IRONLAKE_M(dev)) { | 3314 | if (IS_IRONLAKE_M(dev)) { |
3314 | ironlake_disable_drps(dev); | 3315 | ironlake_disable_drps(dev); |
3315 | ironlake_disable_rc6(dev); | 3316 | ironlake_disable_rc6(dev); |
3316 | } else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) { | 3317 | } else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) { |
3318 | cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work); | ||
3319 | mutex_lock(&dev_priv->rps.hw_lock); | ||
3317 | gen6_disable_rps(dev); | 3320 | gen6_disable_rps(dev); |
3321 | mutex_unlock(&dev_priv->rps.hw_lock); | ||
3318 | } | 3322 | } |
3319 | } | 3323 | } |
3320 | 3324 | ||
3325 | static void intel_gen6_powersave_work(struct work_struct *work) | ||
3326 | { | ||
3327 | struct drm_i915_private *dev_priv = | ||
3328 | container_of(work, struct drm_i915_private, | ||
3329 | rps.delayed_resume_work.work); | ||
3330 | struct drm_device *dev = dev_priv->dev; | ||
3331 | |||
3332 | mutex_lock(&dev_priv->rps.hw_lock); | ||
3333 | gen6_enable_rps(dev); | ||
3334 | gen6_update_ring_freq(dev); | ||
3335 | mutex_unlock(&dev_priv->rps.hw_lock); | ||
3336 | } | ||
3337 | |||
3321 | void intel_enable_gt_powersave(struct drm_device *dev) | 3338 | void intel_enable_gt_powersave(struct drm_device *dev) |
3322 | { | 3339 | { |
3340 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3341 | |||
3323 | if (IS_IRONLAKE_M(dev)) { | 3342 | if (IS_IRONLAKE_M(dev)) { |
3324 | ironlake_enable_drps(dev); | 3343 | ironlake_enable_drps(dev); |
3325 | ironlake_enable_rc6(dev); | 3344 | ironlake_enable_rc6(dev); |
3326 | intel_init_emon(dev); | 3345 | intel_init_emon(dev); |
3327 | } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { | 3346 | } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { |
3328 | gen6_enable_rps(dev); | 3347 | /* |
3329 | gen6_update_ring_freq(dev); | 3348 | * PCU communication is slow and this doesn't need to be |
3349 | * done at any specific time, so do this out of our fast path | ||
3350 | * to make resume and init faster. | ||
3351 | */ | ||
3352 | schedule_delayed_work(&dev_priv->rps.delayed_resume_work, | ||
3353 | round_jiffies_up_relative(HZ)); | ||
3330 | } | 3354 | } |
3331 | } | 3355 | } |
3332 | 3356 | ||
3357 | static void ibx_init_clock_gating(struct drm_device *dev) | ||
3358 | { | ||
3359 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3360 | |||
3361 | /* | ||
3362 | * On Ibex Peak and Cougar Point, we need to disable clock | ||
3363 | * gating for the panel power sequencer or it will fail to | ||
3364 | * start up when no ports are active. | ||
3365 | */ | ||
3366 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); | ||
3367 | } | ||
3368 | |||
3333 | static void ironlake_init_clock_gating(struct drm_device *dev) | 3369 | static void ironlake_init_clock_gating(struct drm_device *dev) |
3334 | { | 3370 | { |
3335 | struct drm_i915_private *dev_priv = dev->dev_private; | 3371 | struct drm_i915_private *dev_priv = dev->dev_private; |
3336 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; | 3372 | uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; |
3337 | 3373 | ||
3338 | /* Required for FBC */ | 3374 | /* Required for FBC */ |
3339 | dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE | | 3375 | dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE | |
3340 | DPFCRUNIT_CLOCK_GATE_DISABLE | | 3376 | ILK_DPFCUNIT_CLOCK_GATE_DISABLE | |
3341 | DPFDUNIT_CLOCK_GATE_DISABLE; | 3377 | ILK_DPFDUNIT_CLOCK_GATE_ENABLE; |
3342 | /* Required for CxSR */ | ||
3343 | dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; | ||
3344 | 3378 | ||
3345 | I915_WRITE(PCH_3DCGDIS0, | 3379 | I915_WRITE(PCH_3DCGDIS0, |
3346 | MARIUNIT_CLOCK_GATE_DISABLE | | 3380 | MARIUNIT_CLOCK_GATE_DISABLE | |
@@ -3348,8 +3382,6 @@ static void ironlake_init_clock_gating(struct drm_device *dev) | |||
3348 | I915_WRITE(PCH_3DCGDIS1, | 3382 | I915_WRITE(PCH_3DCGDIS1, |
3349 | VFMUNIT_CLOCK_GATE_DISABLE); | 3383 | VFMUNIT_CLOCK_GATE_DISABLE); |
3350 | 3384 | ||
3351 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); | ||
3352 | |||
3353 | /* | 3385 | /* |
3354 | * According to the spec the following bits should be set in | 3386 | * According to the spec the following bits should be set in |
3355 | * order to enable memory self-refresh | 3387 | * order to enable memory self-refresh |
@@ -3360,9 +3392,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev) | |||
3360 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | 3392 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
3361 | (I915_READ(ILK_DISPLAY_CHICKEN2) | | 3393 | (I915_READ(ILK_DISPLAY_CHICKEN2) | |
3362 | ILK_DPARB_GATE | ILK_VSDPFD_FULL)); | 3394 | ILK_DPARB_GATE | ILK_VSDPFD_FULL)); |
3363 | I915_WRITE(ILK_DSPCLK_GATE, | 3395 | dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE; |
3364 | (I915_READ(ILK_DSPCLK_GATE) | | ||
3365 | ILK_DPARB_CLK_GATE)); | ||
3366 | I915_WRITE(DISP_ARB_CTL, | 3396 | I915_WRITE(DISP_ARB_CTL, |
3367 | (I915_READ(DISP_ARB_CTL) | | 3397 | (I915_READ(DISP_ARB_CTL) | |
3368 | DISP_FBC_WM_DIS)); | 3398 | DISP_FBC_WM_DIS)); |
@@ -3384,28 +3414,51 @@ static void ironlake_init_clock_gating(struct drm_device *dev) | |||
3384 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | 3414 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
3385 | I915_READ(ILK_DISPLAY_CHICKEN2) | | 3415 | I915_READ(ILK_DISPLAY_CHICKEN2) | |
3386 | ILK_DPARB_GATE); | 3416 | ILK_DPARB_GATE); |
3387 | I915_WRITE(ILK_DSPCLK_GATE, | ||
3388 | I915_READ(ILK_DSPCLK_GATE) | | ||
3389 | ILK_DPFC_DIS1 | | ||
3390 | ILK_DPFC_DIS2 | | ||
3391 | ILK_CLK_FBC); | ||
3392 | } | 3417 | } |
3393 | 3418 | ||
3419 | I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); | ||
3420 | |||
3394 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | 3421 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
3395 | I915_READ(ILK_DISPLAY_CHICKEN2) | | 3422 | I915_READ(ILK_DISPLAY_CHICKEN2) | |
3396 | ILK_ELPIN_409_SELECT); | 3423 | ILK_ELPIN_409_SELECT); |
3397 | I915_WRITE(_3D_CHICKEN2, | 3424 | I915_WRITE(_3D_CHICKEN2, |
3398 | _3D_CHICKEN2_WM_READ_PIPELINED << 16 | | 3425 | _3D_CHICKEN2_WM_READ_PIPELINED << 16 | |
3399 | _3D_CHICKEN2_WM_READ_PIPELINED); | 3426 | _3D_CHICKEN2_WM_READ_PIPELINED); |
3427 | |||
3428 | /* WaDisableRenderCachePipelinedFlush */ | ||
3429 | I915_WRITE(CACHE_MODE_0, | ||
3430 | _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); | ||
3431 | |||
3432 | ibx_init_clock_gating(dev); | ||
3433 | } | ||
3434 | |||
3435 | static void cpt_init_clock_gating(struct drm_device *dev) | ||
3436 | { | ||
3437 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3438 | int pipe; | ||
3439 | |||
3440 | /* | ||
3441 | * On Ibex Peak and Cougar Point, we need to disable clock | ||
3442 | * gating for the panel power sequencer or it will fail to | ||
3443 | * start up when no ports are active. | ||
3444 | */ | ||
3445 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); | ||
3446 | I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | | ||
3447 | DPLS_EDP_PPS_FIX_DIS); | ||
3448 | /* WADP0ClockGatingDisable */ | ||
3449 | for_each_pipe(pipe) { | ||
3450 | I915_WRITE(TRANS_CHICKEN1(pipe), | ||
3451 | TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); | ||
3452 | } | ||
3400 | } | 3453 | } |
3401 | 3454 | ||
3402 | static void gen6_init_clock_gating(struct drm_device *dev) | 3455 | static void gen6_init_clock_gating(struct drm_device *dev) |
3403 | { | 3456 | { |
3404 | struct drm_i915_private *dev_priv = dev->dev_private; | 3457 | struct drm_i915_private *dev_priv = dev->dev_private; |
3405 | int pipe; | 3458 | int pipe; |
3406 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; | 3459 | uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; |
3407 | 3460 | ||
3408 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); | 3461 | I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); |
3409 | 3462 | ||
3410 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | 3463 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
3411 | I915_READ(ILK_DISPLAY_CHICKEN2) | | 3464 | I915_READ(ILK_DISPLAY_CHICKEN2) | |
@@ -3460,10 +3513,10 @@ static void gen6_init_clock_gating(struct drm_device *dev) | |||
3460 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | 3513 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
3461 | I915_READ(ILK_DISPLAY_CHICKEN2) | | 3514 | I915_READ(ILK_DISPLAY_CHICKEN2) | |
3462 | ILK_DPARB_GATE | ILK_VSDPFD_FULL); | 3515 | ILK_DPARB_GATE | ILK_VSDPFD_FULL); |
3463 | I915_WRITE(ILK_DSPCLK_GATE, | 3516 | I915_WRITE(ILK_DSPCLK_GATE_D, |
3464 | I915_READ(ILK_DSPCLK_GATE) | | 3517 | I915_READ(ILK_DSPCLK_GATE_D) | |
3465 | ILK_DPARB_CLK_GATE | | 3518 | ILK_DPARBUNIT_CLOCK_GATE_ENABLE | |
3466 | ILK_DPFD_CLK_GATE); | 3519 | ILK_DPFDUNIT_CLOCK_GATE_ENABLE); |
3467 | 3520 | ||
3468 | I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | | 3521 | I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | |
3469 | GEN6_MBCTL_ENABLE_BOOT_FETCH); | 3522 | GEN6_MBCTL_ENABLE_BOOT_FETCH); |
@@ -3479,6 +3532,8 @@ static void gen6_init_clock_gating(struct drm_device *dev) | |||
3479 | * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */ | 3532 | * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */ |
3480 | I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff)); | 3533 | I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff)); |
3481 | I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI)); | 3534 | I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI)); |
3535 | |||
3536 | cpt_init_clock_gating(dev); | ||
3482 | } | 3537 | } |
3483 | 3538 | ||
3484 | static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) | 3539 | static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) |
@@ -3497,9 +3552,6 @@ static void haswell_init_clock_gating(struct drm_device *dev) | |||
3497 | { | 3552 | { |
3498 | struct drm_i915_private *dev_priv = dev->dev_private; | 3553 | struct drm_i915_private *dev_priv = dev->dev_private; |
3499 | int pipe; | 3554 | int pipe; |
3500 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; | ||
3501 | |||
3502 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); | ||
3503 | 3555 | ||
3504 | I915_WRITE(WM3_LP_ILK, 0); | 3556 | I915_WRITE(WM3_LP_ILK, 0); |
3505 | I915_WRITE(WM2_LP_ILK, 0); | 3557 | I915_WRITE(WM2_LP_ILK, 0); |
@@ -3510,12 +3562,6 @@ static void haswell_init_clock_gating(struct drm_device *dev) | |||
3510 | */ | 3562 | */ |
3511 | I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); | 3563 | I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); |
3512 | 3564 | ||
3513 | I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); | ||
3514 | |||
3515 | I915_WRITE(IVB_CHICKEN3, | ||
3516 | CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | | ||
3517 | CHICKEN3_DGMG_DONE_FIX_DISABLE); | ||
3518 | |||
3519 | /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ | 3565 | /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ |
3520 | I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, | 3566 | I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, |
3521 | GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); | 3567 | GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); |
@@ -3559,21 +3605,31 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) | |||
3559 | { | 3605 | { |
3560 | struct drm_i915_private *dev_priv = dev->dev_private; | 3606 | struct drm_i915_private *dev_priv = dev->dev_private; |
3561 | int pipe; | 3607 | int pipe; |
3562 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; | ||
3563 | uint32_t snpcr; | 3608 | uint32_t snpcr; |
3564 | 3609 | ||
3565 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); | ||
3566 | |||
3567 | I915_WRITE(WM3_LP_ILK, 0); | 3610 | I915_WRITE(WM3_LP_ILK, 0); |
3568 | I915_WRITE(WM2_LP_ILK, 0); | 3611 | I915_WRITE(WM2_LP_ILK, 0); |
3569 | I915_WRITE(WM1_LP_ILK, 0); | 3612 | I915_WRITE(WM1_LP_ILK, 0); |
3570 | 3613 | ||
3571 | I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); | 3614 | I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); |
3615 | |||
3616 | /* WaDisableEarlyCull */ | ||
3617 | I915_WRITE(_3D_CHICKEN3, | ||
3618 | _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); | ||
3572 | 3619 | ||
3620 | /* WaDisableBackToBackFlipFix */ | ||
3573 | I915_WRITE(IVB_CHICKEN3, | 3621 | I915_WRITE(IVB_CHICKEN3, |
3574 | CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | | 3622 | CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | |
3575 | CHICKEN3_DGMG_DONE_FIX_DISABLE); | 3623 | CHICKEN3_DGMG_DONE_FIX_DISABLE); |
3576 | 3624 | ||
3625 | /* WaDisablePSDDualDispatchEnable */ | ||
3626 | if (IS_IVB_GT1(dev)) | ||
3627 | I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, | ||
3628 | _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); | ||
3629 | else | ||
3630 | I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2, | ||
3631 | _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); | ||
3632 | |||
3577 | /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ | 3633 | /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ |
3578 | I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, | 3634 | I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, |
3579 | GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); | 3635 | GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); |
@@ -3582,7 +3638,18 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) | |||
3582 | I915_WRITE(GEN7_L3CNTLREG1, | 3638 | I915_WRITE(GEN7_L3CNTLREG1, |
3583 | GEN7_WA_FOR_GEN7_L3_CONTROL); | 3639 | GEN7_WA_FOR_GEN7_L3_CONTROL); |
3584 | I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, | 3640 | I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, |
3585 | GEN7_WA_L3_CHICKEN_MODE); | 3641 | GEN7_WA_L3_CHICKEN_MODE); |
3642 | if (IS_IVB_GT1(dev)) | ||
3643 | I915_WRITE(GEN7_ROW_CHICKEN2, | ||
3644 | _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); | ||
3645 | else | ||
3646 | I915_WRITE(GEN7_ROW_CHICKEN2_GT2, | ||
3647 | _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); | ||
3648 | |||
3649 | |||
3650 | /* WaForceL3Serialization */ | ||
3651 | I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & | ||
3652 | ~L3SQ_URB_READ_CAM_MATCH_DISABLE); | ||
3586 | 3653 | ||
3587 | /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock | 3654 | /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock |
3588 | * gating disable must be set. Failure to set it results in | 3655 | * gating disable must be set. Failure to set it results in |
@@ -3626,34 +3693,53 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) | |||
3626 | snpcr &= ~GEN6_MBC_SNPCR_MASK; | 3693 | snpcr &= ~GEN6_MBC_SNPCR_MASK; |
3627 | snpcr |= GEN6_MBC_SNPCR_MED; | 3694 | snpcr |= GEN6_MBC_SNPCR_MED; |
3628 | I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); | 3695 | I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); |
3696 | |||
3697 | cpt_init_clock_gating(dev); | ||
3629 | } | 3698 | } |
3630 | 3699 | ||
3631 | static void valleyview_init_clock_gating(struct drm_device *dev) | 3700 | static void valleyview_init_clock_gating(struct drm_device *dev) |
3632 | { | 3701 | { |
3633 | struct drm_i915_private *dev_priv = dev->dev_private; | 3702 | struct drm_i915_private *dev_priv = dev->dev_private; |
3634 | int pipe; | 3703 | int pipe; |
3635 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; | ||
3636 | |||
3637 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); | ||
3638 | 3704 | ||
3639 | I915_WRITE(WM3_LP_ILK, 0); | 3705 | I915_WRITE(WM3_LP_ILK, 0); |
3640 | I915_WRITE(WM2_LP_ILK, 0); | 3706 | I915_WRITE(WM2_LP_ILK, 0); |
3641 | I915_WRITE(WM1_LP_ILK, 0); | 3707 | I915_WRITE(WM1_LP_ILK, 0); |
3642 | 3708 | ||
3643 | I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); | 3709 | I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); |
3710 | |||
3711 | /* WaDisableEarlyCull */ | ||
3712 | I915_WRITE(_3D_CHICKEN3, | ||
3713 | _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); | ||
3644 | 3714 | ||
3715 | /* WaDisableBackToBackFlipFix */ | ||
3645 | I915_WRITE(IVB_CHICKEN3, | 3716 | I915_WRITE(IVB_CHICKEN3, |
3646 | CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | | 3717 | CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | |
3647 | CHICKEN3_DGMG_DONE_FIX_DISABLE); | 3718 | CHICKEN3_DGMG_DONE_FIX_DISABLE); |
3648 | 3719 | ||
3720 | I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, | ||
3721 | _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); | ||
3722 | |||
3649 | /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ | 3723 | /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ |
3650 | I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, | 3724 | I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, |
3651 | GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); | 3725 | GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); |
3652 | 3726 | ||
3653 | /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ | 3727 | /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ |
3654 | I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL); | 3728 | I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS); |
3655 | I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE); | 3729 | I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE); |
3656 | 3730 | ||
3731 | /* WaForceL3Serialization */ | ||
3732 | I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & | ||
3733 | ~L3SQ_URB_READ_CAM_MATCH_DISABLE); | ||
3734 | |||
3735 | /* WaDisableDopClockGating */ | ||
3736 | I915_WRITE(GEN7_ROW_CHICKEN2, | ||
3737 | _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); | ||
3738 | |||
3739 | /* WaForceL3Serialization */ | ||
3740 | I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & | ||
3741 | ~L3SQ_URB_READ_CAM_MATCH_DISABLE); | ||
3742 | |||
3657 | /* This is required by WaCatErrorRejectionIssue */ | 3743 | /* This is required by WaCatErrorRejectionIssue */ |
3658 | I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, | 3744 | I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, |
3659 | I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | | 3745 | I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | |
@@ -3710,6 +3796,13 @@ static void valleyview_init_clock_gating(struct drm_device *dev) | |||
3710 | PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN | | 3796 | PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN | |
3711 | SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN | | 3797 | SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN | |
3712 | PLANEA_FLIPDONE_INT_EN); | 3798 | PLANEA_FLIPDONE_INT_EN); |
3799 | |||
3800 | /* | ||
3801 | * WaDisableVLVClockGating_VBIIssue | ||
3802 | * Disable clock gating on th GCFG unit to prevent a delay | ||
3803 | * in the reporting of vblank events. | ||
3804 | */ | ||
3805 | I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS); | ||
3713 | } | 3806 | } |
3714 | 3807 | ||
3715 | static void g4x_init_clock_gating(struct drm_device *dev) | 3808 | static void g4x_init_clock_gating(struct drm_device *dev) |
@@ -3728,6 +3821,10 @@ static void g4x_init_clock_gating(struct drm_device *dev) | |||
3728 | if (IS_GM45(dev)) | 3821 | if (IS_GM45(dev)) |
3729 | dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; | 3822 | dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; |
3730 | I915_WRITE(DSPCLK_GATE_D, dspclk_gate); | 3823 | I915_WRITE(DSPCLK_GATE_D, dspclk_gate); |
3824 | |||
3825 | /* WaDisableRenderCachePipelinedFlush */ | ||
3826 | I915_WRITE(CACHE_MODE_0, | ||
3827 | _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); | ||
3731 | } | 3828 | } |
3732 | 3829 | ||
3733 | static void crestline_init_clock_gating(struct drm_device *dev) | 3830 | static void crestline_init_clock_gating(struct drm_device *dev) |
@@ -3783,44 +3880,11 @@ static void i830_init_clock_gating(struct drm_device *dev) | |||
3783 | I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); | 3880 | I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); |
3784 | } | 3881 | } |
3785 | 3882 | ||
3786 | static void ibx_init_clock_gating(struct drm_device *dev) | ||
3787 | { | ||
3788 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3789 | |||
3790 | /* | ||
3791 | * On Ibex Peak and Cougar Point, we need to disable clock | ||
3792 | * gating for the panel power sequencer or it will fail to | ||
3793 | * start up when no ports are active. | ||
3794 | */ | ||
3795 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); | ||
3796 | } | ||
3797 | |||
3798 | static void cpt_init_clock_gating(struct drm_device *dev) | ||
3799 | { | ||
3800 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3801 | int pipe; | ||
3802 | |||
3803 | /* | ||
3804 | * On Ibex Peak and Cougar Point, we need to disable clock | ||
3805 | * gating for the panel power sequencer or it will fail to | ||
3806 | * start up when no ports are active. | ||
3807 | */ | ||
3808 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); | ||
3809 | I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | | ||
3810 | DPLS_EDP_PPS_FIX_DIS); | ||
3811 | /* Without this, mode sets may fail silently on FDI */ | ||
3812 | for_each_pipe(pipe) | ||
3813 | I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS); | ||
3814 | } | ||
3815 | |||
3816 | void intel_init_clock_gating(struct drm_device *dev) | 3883 | void intel_init_clock_gating(struct drm_device *dev) |
3817 | { | 3884 | { |
3818 | struct drm_i915_private *dev_priv = dev->dev_private; | 3885 | struct drm_i915_private *dev_priv = dev->dev_private; |
3819 | 3886 | ||
3820 | dev_priv->display.init_clock_gating(dev); | 3887 | dev_priv->display.init_clock_gating(dev); |
3821 | |||
3822 | if (dev_priv->display.init_pch_clock_gating) | ||
3823 | dev_priv->display.init_pch_clock_gating(dev); | ||
3824 | } | 3888 | } |
3825 | 3889 | ||
3826 | /* Starting with Haswell, we have different power wells for | 3890 | /* Starting with Haswell, we have different power wells for |
@@ -3846,7 +3910,7 @@ void intel_init_power_wells(struct drm_device *dev) | |||
3846 | 3910 | ||
3847 | if ((well & HSW_PWR_WELL_STATE) == 0) { | 3911 | if ((well & HSW_PWR_WELL_STATE) == 0) { |
3848 | I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE); | 3912 | I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE); |
3849 | if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20)) | 3913 | if (wait_for((I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE), 20)) |
3850 | DRM_ERROR("Error enabling power well %lx\n", power_wells[i]); | 3914 | DRM_ERROR("Error enabling power well %lx\n", power_wells[i]); |
3851 | } | 3915 | } |
3852 | } | 3916 | } |
@@ -3884,11 +3948,6 @@ void intel_init_pm(struct drm_device *dev) | |||
3884 | 3948 | ||
3885 | /* For FIFO watermark updates */ | 3949 | /* For FIFO watermark updates */ |
3886 | if (HAS_PCH_SPLIT(dev)) { | 3950 | if (HAS_PCH_SPLIT(dev)) { |
3887 | if (HAS_PCH_IBX(dev)) | ||
3888 | dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating; | ||
3889 | else if (HAS_PCH_CPT(dev)) | ||
3890 | dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating; | ||
3891 | |||
3892 | if (IS_GEN5(dev)) { | 3951 | if (IS_GEN5(dev)) { |
3893 | if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) | 3952 | if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) |
3894 | dev_priv->display.update_wm = ironlake_update_wm; | 3953 | dev_priv->display.update_wm = ironlake_update_wm; |
@@ -3999,6 +4058,12 @@ static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) | |||
3999 | DRM_ERROR("GT thread status wait timed out\n"); | 4058 | DRM_ERROR("GT thread status wait timed out\n"); |
4000 | } | 4059 | } |
4001 | 4060 | ||
4061 | static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv) | ||
4062 | { | ||
4063 | I915_WRITE_NOTRACE(FORCEWAKE, 0); | ||
4064 | POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ | ||
4065 | } | ||
4066 | |||
4002 | static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) | 4067 | static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) |
4003 | { | 4068 | { |
4004 | u32 forcewake_ack; | 4069 | u32 forcewake_ack; |
@@ -4012,7 +4077,7 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) | |||
4012 | FORCEWAKE_ACK_TIMEOUT_MS)) | 4077 | FORCEWAKE_ACK_TIMEOUT_MS)) |
4013 | DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); | 4078 | DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); |
4014 | 4079 | ||
4015 | I915_WRITE_NOTRACE(FORCEWAKE, 1); | 4080 | I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL); |
4016 | POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ | 4081 | POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ |
4017 | 4082 | ||
4018 | if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), | 4083 | if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), |
@@ -4022,6 +4087,12 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) | |||
4022 | __gen6_gt_wait_for_thread_c0(dev_priv); | 4087 | __gen6_gt_wait_for_thread_c0(dev_priv); |
4023 | } | 4088 | } |
4024 | 4089 | ||
4090 | static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) | ||
4091 | { | ||
4092 | I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); | ||
4093 | POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ | ||
4094 | } | ||
4095 | |||
4025 | static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) | 4096 | static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) |
4026 | { | 4097 | { |
4027 | u32 forcewake_ack; | 4098 | u32 forcewake_ack; |
@@ -4035,7 +4106,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) | |||
4035 | FORCEWAKE_ACK_TIMEOUT_MS)) | 4106 | FORCEWAKE_ACK_TIMEOUT_MS)) |
4036 | DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); | 4107 | DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); |
4037 | 4108 | ||
4038 | I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1)); | 4109 | I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); |
4039 | POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ | 4110 | POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ |
4040 | 4111 | ||
4041 | if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), | 4112 | if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), |
@@ -4079,7 +4150,7 @@ static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) | |||
4079 | 4150 | ||
4080 | static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) | 4151 | static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) |
4081 | { | 4152 | { |
4082 | I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1)); | 4153 | I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); |
4083 | /* gen6_gt_check_fifodbg doubles as the POSTING_READ */ | 4154 | /* gen6_gt_check_fifodbg doubles as the POSTING_READ */ |
4084 | gen6_gt_check_fifodbg(dev_priv); | 4155 | gen6_gt_check_fifodbg(dev_priv); |
4085 | } | 4156 | } |
@@ -4117,13 +4188,18 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) | |||
4117 | return ret; | 4188 | return ret; |
4118 | } | 4189 | } |
4119 | 4190 | ||
4191 | static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) | ||
4192 | { | ||
4193 | I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff)); | ||
4194 | } | ||
4195 | |||
4120 | static void vlv_force_wake_get(struct drm_i915_private *dev_priv) | 4196 | static void vlv_force_wake_get(struct drm_i915_private *dev_priv) |
4121 | { | 4197 | { |
4122 | if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0, | 4198 | if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0, |
4123 | FORCEWAKE_ACK_TIMEOUT_MS)) | 4199 | FORCEWAKE_ACK_TIMEOUT_MS)) |
4124 | DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); | 4200 | DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); |
4125 | 4201 | ||
4126 | I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(1)); | 4202 | I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); |
4127 | 4203 | ||
4128 | if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), | 4204 | if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), |
4129 | FORCEWAKE_ACK_TIMEOUT_MS)) | 4205 | FORCEWAKE_ACK_TIMEOUT_MS)) |
@@ -4134,49 +4210,89 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv) | |||
4134 | 4210 | ||
4135 | static void vlv_force_wake_put(struct drm_i915_private *dev_priv) | 4211 | static void vlv_force_wake_put(struct drm_i915_private *dev_priv) |
4136 | { | 4212 | { |
4137 | I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(1)); | 4213 | I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); |
4138 | /* The below doubles as a POSTING_READ */ | 4214 | /* The below doubles as a POSTING_READ */ |
4139 | gen6_gt_check_fifodbg(dev_priv); | 4215 | gen6_gt_check_fifodbg(dev_priv); |
4140 | } | 4216 | } |
4141 | 4217 | ||
4218 | void intel_gt_reset(struct drm_device *dev) | ||
4219 | { | ||
4220 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4221 | |||
4222 | if (IS_VALLEYVIEW(dev)) { | ||
4223 | vlv_force_wake_reset(dev_priv); | ||
4224 | } else if (INTEL_INFO(dev)->gen >= 6) { | ||
4225 | __gen6_gt_force_wake_reset(dev_priv); | ||
4226 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) | ||
4227 | __gen6_gt_force_wake_mt_reset(dev_priv); | ||
4228 | } | ||
4229 | } | ||
4230 | |||
4142 | void intel_gt_init(struct drm_device *dev) | 4231 | void intel_gt_init(struct drm_device *dev) |
4143 | { | 4232 | { |
4144 | struct drm_i915_private *dev_priv = dev->dev_private; | 4233 | struct drm_i915_private *dev_priv = dev->dev_private; |
4145 | 4234 | ||
4146 | spin_lock_init(&dev_priv->gt_lock); | 4235 | spin_lock_init(&dev_priv->gt_lock); |
4147 | 4236 | ||
4237 | intel_gt_reset(dev); | ||
4238 | |||
4148 | if (IS_VALLEYVIEW(dev)) { | 4239 | if (IS_VALLEYVIEW(dev)) { |
4149 | dev_priv->gt.force_wake_get = vlv_force_wake_get; | 4240 | dev_priv->gt.force_wake_get = vlv_force_wake_get; |
4150 | dev_priv->gt.force_wake_put = vlv_force_wake_put; | 4241 | dev_priv->gt.force_wake_put = vlv_force_wake_put; |
4151 | } else if (INTEL_INFO(dev)->gen >= 6) { | 4242 | } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { |
4243 | dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get; | ||
4244 | dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put; | ||
4245 | } else if (IS_GEN6(dev)) { | ||
4152 | dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; | 4246 | dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; |
4153 | dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; | 4247 | dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; |
4248 | } | ||
4249 | INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, | ||
4250 | intel_gen6_powersave_work); | ||
4251 | } | ||
4154 | 4252 | ||
4155 | /* IVB configs may use multi-threaded forcewake */ | 4253 | int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val) |
4156 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { | 4254 | { |
4157 | u32 ecobus; | 4255 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
4158 | 4256 | ||
4159 | /* A small trick here - if the bios hasn't configured | 4257 | if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { |
4160 | * MT forcewake, and if the device is in RC6, then | 4258 | DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n"); |
4161 | * force_wake_mt_get will not wake the device and the | 4259 | return -EAGAIN; |
4162 | * ECOBUS read will return zero. Which will be | ||
4163 | * (correctly) interpreted by the test below as MT | ||
4164 | * forcewake being disabled. | ||
4165 | */ | ||
4166 | mutex_lock(&dev->struct_mutex); | ||
4167 | __gen6_gt_force_wake_mt_get(dev_priv); | ||
4168 | ecobus = I915_READ_NOTRACE(ECOBUS); | ||
4169 | __gen6_gt_force_wake_mt_put(dev_priv); | ||
4170 | mutex_unlock(&dev->struct_mutex); | ||
4171 | |||
4172 | if (ecobus & FORCEWAKE_MT_ENABLE) { | ||
4173 | DRM_DEBUG_KMS("Using MT version of forcewake\n"); | ||
4174 | dev_priv->gt.force_wake_get = | ||
4175 | __gen6_gt_force_wake_mt_get; | ||
4176 | dev_priv->gt.force_wake_put = | ||
4177 | __gen6_gt_force_wake_mt_put; | ||
4178 | } | ||
4179 | } | ||
4180 | } | 4260 | } |
4261 | |||
4262 | I915_WRITE(GEN6_PCODE_DATA, *val); | ||
4263 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); | ||
4264 | |||
4265 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
4266 | 500)) { | ||
4267 | DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox); | ||
4268 | return -ETIMEDOUT; | ||
4269 | } | ||
4270 | |||
4271 | *val = I915_READ(GEN6_PCODE_DATA); | ||
4272 | I915_WRITE(GEN6_PCODE_DATA, 0); | ||
4273 | |||
4274 | return 0; | ||
4181 | } | 4275 | } |
4182 | 4276 | ||
4277 | int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val) | ||
4278 | { | ||
4279 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | ||
4280 | |||
4281 | if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { | ||
4282 | DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n"); | ||
4283 | return -EAGAIN; | ||
4284 | } | ||
4285 | |||
4286 | I915_WRITE(GEN6_PCODE_DATA, val); | ||
4287 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); | ||
4288 | |||
4289 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | ||
4290 | 500)) { | ||
4291 | DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox); | ||
4292 | return -ETIMEDOUT; | ||
4293 | } | ||
4294 | |||
4295 | I915_WRITE(GEN6_PCODE_DATA, 0); | ||
4296 | |||
4297 | return 0; | ||
4298 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index ecbc5c5dbbbc..1aa76892a830 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -245,7 +245,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring, | |||
245 | /* | 245 | /* |
246 | * TLB invalidate requires a post-sync write. | 246 | * TLB invalidate requires a post-sync write. |
247 | */ | 247 | */ |
248 | flags |= PIPE_CONTROL_QW_WRITE; | 248 | flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; |
249 | } | 249 | } |
250 | 250 | ||
251 | ret = intel_ring_begin(ring, 4); | 251 | ret = intel_ring_begin(ring, 4); |
@@ -964,7 +964,9 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) | |||
964 | } | 964 | } |
965 | 965 | ||
966 | static int | 966 | static int |
967 | i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) | 967 | i965_dispatch_execbuffer(struct intel_ring_buffer *ring, |
968 | u32 offset, u32 length, | ||
969 | unsigned flags) | ||
968 | { | 970 | { |
969 | int ret; | 971 | int ret; |
970 | 972 | ||
@@ -975,7 +977,7 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) | |||
975 | intel_ring_emit(ring, | 977 | intel_ring_emit(ring, |
976 | MI_BATCH_BUFFER_START | | 978 | MI_BATCH_BUFFER_START | |
977 | MI_BATCH_GTT | | 979 | MI_BATCH_GTT | |
978 | MI_BATCH_NON_SECURE_I965); | 980 | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); |
979 | intel_ring_emit(ring, offset); | 981 | intel_ring_emit(ring, offset); |
980 | intel_ring_advance(ring); | 982 | intel_ring_advance(ring); |
981 | 983 | ||
@@ -984,7 +986,8 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) | |||
984 | 986 | ||
985 | static int | 987 | static int |
986 | i830_dispatch_execbuffer(struct intel_ring_buffer *ring, | 988 | i830_dispatch_execbuffer(struct intel_ring_buffer *ring, |
987 | u32 offset, u32 len) | 989 | u32 offset, u32 len, |
990 | unsigned flags) | ||
988 | { | 991 | { |
989 | int ret; | 992 | int ret; |
990 | 993 | ||
@@ -993,7 +996,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring, | |||
993 | return ret; | 996 | return ret; |
994 | 997 | ||
995 | intel_ring_emit(ring, MI_BATCH_BUFFER); | 998 | intel_ring_emit(ring, MI_BATCH_BUFFER); |
996 | intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); | 999 | intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); |
997 | intel_ring_emit(ring, offset + len - 8); | 1000 | intel_ring_emit(ring, offset + len - 8); |
998 | intel_ring_emit(ring, 0); | 1001 | intel_ring_emit(ring, 0); |
999 | intel_ring_advance(ring); | 1002 | intel_ring_advance(ring); |
@@ -1003,7 +1006,8 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring, | |||
1003 | 1006 | ||
1004 | static int | 1007 | static int |
1005 | i915_dispatch_execbuffer(struct intel_ring_buffer *ring, | 1008 | i915_dispatch_execbuffer(struct intel_ring_buffer *ring, |
1006 | u32 offset, u32 len) | 1009 | u32 offset, u32 len, |
1010 | unsigned flags) | ||
1007 | { | 1011 | { |
1008 | int ret; | 1012 | int ret; |
1009 | 1013 | ||
@@ -1012,7 +1016,7 @@ i915_dispatch_execbuffer(struct intel_ring_buffer *ring, | |||
1012 | return ret; | 1016 | return ret; |
1013 | 1017 | ||
1014 | intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); | 1018 | intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); |
1015 | intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); | 1019 | intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); |
1016 | intel_ring_advance(ring); | 1020 | intel_ring_advance(ring); |
1017 | 1021 | ||
1018 | return 0; | 1022 | return 0; |
@@ -1075,6 +1079,29 @@ err: | |||
1075 | return ret; | 1079 | return ret; |
1076 | } | 1080 | } |
1077 | 1081 | ||
1082 | static int init_phys_hws_pga(struct intel_ring_buffer *ring) | ||
1083 | { | ||
1084 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | ||
1085 | u32 addr; | ||
1086 | |||
1087 | if (!dev_priv->status_page_dmah) { | ||
1088 | dev_priv->status_page_dmah = | ||
1089 | drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE); | ||
1090 | if (!dev_priv->status_page_dmah) | ||
1091 | return -ENOMEM; | ||
1092 | } | ||
1093 | |||
1094 | addr = dev_priv->status_page_dmah->busaddr; | ||
1095 | if (INTEL_INFO(ring->dev)->gen >= 4) | ||
1096 | addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; | ||
1097 | I915_WRITE(HWS_PGA, addr); | ||
1098 | |||
1099 | ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; | ||
1100 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | ||
1101 | |||
1102 | return 0; | ||
1103 | } | ||
1104 | |||
1078 | static int intel_init_ring_buffer(struct drm_device *dev, | 1105 | static int intel_init_ring_buffer(struct drm_device *dev, |
1079 | struct intel_ring_buffer *ring) | 1106 | struct intel_ring_buffer *ring) |
1080 | { | 1107 | { |
@@ -1093,6 +1120,11 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
1093 | ret = init_status_page(ring); | 1120 | ret = init_status_page(ring); |
1094 | if (ret) | 1121 | if (ret) |
1095 | return ret; | 1122 | return ret; |
1123 | } else { | ||
1124 | BUG_ON(ring->id != RCS); | ||
1125 | ret = init_phys_hws_pga(ring); | ||
1126 | if (ret) | ||
1127 | return ret; | ||
1096 | } | 1128 | } |
1097 | 1129 | ||
1098 | obj = i915_gem_alloc_object(dev, ring->size); | 1130 | obj = i915_gem_alloc_object(dev, ring->size); |
@@ -1391,10 +1423,17 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring, | |||
1391 | return ret; | 1423 | return ret; |
1392 | 1424 | ||
1393 | cmd = MI_FLUSH_DW; | 1425 | cmd = MI_FLUSH_DW; |
1426 | /* | ||
1427 | * Bspec vol 1c.5 - video engine command streamer: | ||
1428 | * "If ENABLED, all TLBs will be invalidated once the flush | ||
1429 | * operation is complete. This bit is only valid when the | ||
1430 | * Post-Sync Operation field is a value of 1h or 3h." | ||
1431 | */ | ||
1394 | if (invalidate & I915_GEM_GPU_DOMAINS) | 1432 | if (invalidate & I915_GEM_GPU_DOMAINS) |
1395 | cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; | 1433 | cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | |
1434 | MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; | ||
1396 | intel_ring_emit(ring, cmd); | 1435 | intel_ring_emit(ring, cmd); |
1397 | intel_ring_emit(ring, 0); | 1436 | intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); |
1398 | intel_ring_emit(ring, 0); | 1437 | intel_ring_emit(ring, 0); |
1399 | intel_ring_emit(ring, MI_NOOP); | 1438 | intel_ring_emit(ring, MI_NOOP); |
1400 | intel_ring_advance(ring); | 1439 | intel_ring_advance(ring); |
@@ -1402,8 +1441,30 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring, | |||
1402 | } | 1441 | } |
1403 | 1442 | ||
1404 | static int | 1443 | static int |
1444 | hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, | ||
1445 | u32 offset, u32 len, | ||
1446 | unsigned flags) | ||
1447 | { | ||
1448 | int ret; | ||
1449 | |||
1450 | ret = intel_ring_begin(ring, 2); | ||
1451 | if (ret) | ||
1452 | return ret; | ||
1453 | |||
1454 | intel_ring_emit(ring, | ||
1455 | MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW | | ||
1456 | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW)); | ||
1457 | /* bit0-7 is the length on GEN6+ */ | ||
1458 | intel_ring_emit(ring, offset); | ||
1459 | intel_ring_advance(ring); | ||
1460 | |||
1461 | return 0; | ||
1462 | } | ||
1463 | |||
1464 | static int | ||
1405 | gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, | 1465 | gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
1406 | u32 offset, u32 len) | 1466 | u32 offset, u32 len, |
1467 | unsigned flags) | ||
1407 | { | 1468 | { |
1408 | int ret; | 1469 | int ret; |
1409 | 1470 | ||
@@ -1411,7 +1472,9 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, | |||
1411 | if (ret) | 1472 | if (ret) |
1412 | return ret; | 1473 | return ret; |
1413 | 1474 | ||
1414 | intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); | 1475 | intel_ring_emit(ring, |
1476 | MI_BATCH_BUFFER_START | | ||
1477 | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); | ||
1415 | /* bit0-7 is the length on GEN6+ */ | 1478 | /* bit0-7 is the length on GEN6+ */ |
1416 | intel_ring_emit(ring, offset); | 1479 | intel_ring_emit(ring, offset); |
1417 | intel_ring_advance(ring); | 1480 | intel_ring_advance(ring); |
@@ -1432,10 +1495,17 @@ static int blt_ring_flush(struct intel_ring_buffer *ring, | |||
1432 | return ret; | 1495 | return ret; |
1433 | 1496 | ||
1434 | cmd = MI_FLUSH_DW; | 1497 | cmd = MI_FLUSH_DW; |
1498 | /* | ||
1499 | * Bspec vol 1c.3 - blitter engine command streamer: | ||
1500 | * "If ENABLED, all TLBs will be invalidated once the flush | ||
1501 | * operation is complete. This bit is only valid when the | ||
1502 | * Post-Sync Operation field is a value of 1h or 3h." | ||
1503 | */ | ||
1435 | if (invalidate & I915_GEM_DOMAIN_RENDER) | 1504 | if (invalidate & I915_GEM_DOMAIN_RENDER) |
1436 | cmd |= MI_INVALIDATE_TLB; | 1505 | cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | |
1506 | MI_FLUSH_DW_OP_STOREDW; | ||
1437 | intel_ring_emit(ring, cmd); | 1507 | intel_ring_emit(ring, cmd); |
1438 | intel_ring_emit(ring, 0); | 1508 | intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); |
1439 | intel_ring_emit(ring, 0); | 1509 | intel_ring_emit(ring, 0); |
1440 | intel_ring_emit(ring, MI_NOOP); | 1510 | intel_ring_emit(ring, MI_NOOP); |
1441 | intel_ring_advance(ring); | 1511 | intel_ring_advance(ring); |
@@ -1490,7 +1560,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev) | |||
1490 | ring->irq_enable_mask = I915_USER_INTERRUPT; | 1560 | ring->irq_enable_mask = I915_USER_INTERRUPT; |
1491 | } | 1561 | } |
1492 | ring->write_tail = ring_write_tail; | 1562 | ring->write_tail = ring_write_tail; |
1493 | if (INTEL_INFO(dev)->gen >= 6) | 1563 | if (IS_HASWELL(dev)) |
1564 | ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; | ||
1565 | else if (INTEL_INFO(dev)->gen >= 6) | ||
1494 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; | 1566 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; |
1495 | else if (INTEL_INFO(dev)->gen >= 4) | 1567 | else if (INTEL_INFO(dev)->gen >= 4) |
1496 | ring->dispatch_execbuffer = i965_dispatch_execbuffer; | 1568 | ring->dispatch_execbuffer = i965_dispatch_execbuffer; |
@@ -1501,12 +1573,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev) | |||
1501 | ring->init = init_render_ring; | 1573 | ring->init = init_render_ring; |
1502 | ring->cleanup = render_ring_cleanup; | 1574 | ring->cleanup = render_ring_cleanup; |
1503 | 1575 | ||
1504 | |||
1505 | if (!I915_NEED_GFX_HWS(dev)) { | ||
1506 | ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; | ||
1507 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | ||
1508 | } | ||
1509 | |||
1510 | return intel_init_ring_buffer(dev, ring); | 1576 | return intel_init_ring_buffer(dev, ring); |
1511 | } | 1577 | } |
1512 | 1578 | ||
@@ -1514,6 +1580,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) | |||
1514 | { | 1580 | { |
1515 | drm_i915_private_t *dev_priv = dev->dev_private; | 1581 | drm_i915_private_t *dev_priv = dev->dev_private; |
1516 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | 1582 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
1583 | int ret; | ||
1517 | 1584 | ||
1518 | ring->name = "render ring"; | 1585 | ring->name = "render ring"; |
1519 | ring->id = RCS; | 1586 | ring->id = RCS; |
@@ -1551,16 +1618,13 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) | |||
1551 | ring->init = init_render_ring; | 1618 | ring->init = init_render_ring; |
1552 | ring->cleanup = render_ring_cleanup; | 1619 | ring->cleanup = render_ring_cleanup; |
1553 | 1620 | ||
1554 | if (!I915_NEED_GFX_HWS(dev)) | ||
1555 | ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; | ||
1556 | |||
1557 | ring->dev = dev; | 1621 | ring->dev = dev; |
1558 | INIT_LIST_HEAD(&ring->active_list); | 1622 | INIT_LIST_HEAD(&ring->active_list); |
1559 | INIT_LIST_HEAD(&ring->request_list); | 1623 | INIT_LIST_HEAD(&ring->request_list); |
1560 | 1624 | ||
1561 | ring->size = size; | 1625 | ring->size = size; |
1562 | ring->effective_size = ring->size; | 1626 | ring->effective_size = ring->size; |
1563 | if (IS_I830(ring->dev)) | 1627 | if (IS_I830(ring->dev) || IS_845G(ring->dev)) |
1564 | ring->effective_size -= 128; | 1628 | ring->effective_size -= 128; |
1565 | 1629 | ||
1566 | ring->virtual_start = ioremap_wc(start, size); | 1630 | ring->virtual_start = ioremap_wc(start, size); |
@@ -1570,6 +1634,12 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) | |||
1570 | return -ENOMEM; | 1634 | return -ENOMEM; |
1571 | } | 1635 | } |
1572 | 1636 | ||
1637 | if (!I915_NEED_GFX_HWS(dev)) { | ||
1638 | ret = init_phys_hws_pga(ring); | ||
1639 | if (ret) | ||
1640 | return ret; | ||
1641 | } | ||
1642 | |||
1573 | return 0; | 1643 | return 0; |
1574 | } | 1644 | } |
1575 | 1645 | ||
@@ -1618,7 +1688,6 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) | |||
1618 | } | 1688 | } |
1619 | ring->init = init_ring_common; | 1689 | ring->init = init_ring_common; |
1620 | 1690 | ||
1621 | |||
1622 | return intel_init_ring_buffer(dev, ring); | 1691 | return intel_init_ring_buffer(dev, ring); |
1623 | } | 1692 | } |
1624 | 1693 | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 2ea7a311a1f0..5af65b89765f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -81,7 +81,9 @@ struct intel_ring_buffer { | |||
81 | u32 (*get_seqno)(struct intel_ring_buffer *ring, | 81 | u32 (*get_seqno)(struct intel_ring_buffer *ring, |
82 | bool lazy_coherency); | 82 | bool lazy_coherency); |
83 | int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, | 83 | int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, |
84 | u32 offset, u32 length); | 84 | u32 offset, u32 length, |
85 | unsigned flags); | ||
86 | #define I915_DISPATCH_SECURE 0x1 | ||
85 | void (*cleanup)(struct intel_ring_buffer *ring); | 87 | void (*cleanup)(struct intel_ring_buffer *ring); |
86 | int (*sync_to)(struct intel_ring_buffer *ring, | 88 | int (*sync_to)(struct intel_ring_buffer *ring, |
87 | struct intel_ring_buffer *to, | 89 | struct intel_ring_buffer *to, |
@@ -181,6 +183,8 @@ intel_read_status_page(struct intel_ring_buffer *ring, | |||
181 | * The area from dword 0x20 to 0x3ff is available for driver usage. | 183 | * The area from dword 0x20 to 0x3ff is available for driver usage. |
182 | */ | 184 | */ |
183 | #define I915_GEM_HWS_INDEX 0x20 | 185 | #define I915_GEM_HWS_INDEX 0x20 |
186 | #define I915_GEM_HWS_SCRATCH_INDEX 0x30 | ||
187 | #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) | ||
184 | 188 | ||
185 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); | 189 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); |
186 | 190 | ||
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 79d308da29ff..aea64425b1a2 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -2072,17 +2072,24 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, | |||
2072 | else | 2072 | else |
2073 | mapping = &dev_priv->sdvo_mappings[1]; | 2073 | mapping = &dev_priv->sdvo_mappings[1]; |
2074 | 2074 | ||
2075 | pin = GMBUS_PORT_DPB; | 2075 | if (mapping->initialized && intel_gmbus_is_port_valid(mapping->i2c_pin)) |
2076 | if (mapping->initialized) | ||
2077 | pin = mapping->i2c_pin; | 2076 | pin = mapping->i2c_pin; |
2077 | else | ||
2078 | pin = GMBUS_PORT_DPB; | ||
2078 | 2079 | ||
2079 | if (intel_gmbus_is_port_valid(pin)) { | 2080 | sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin); |
2080 | sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin); | 2081 | |
2081 | intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ); | 2082 | /* With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow |
2082 | intel_gmbus_force_bit(sdvo->i2c, true); | 2083 | * our code totally fails once we start using gmbus. Hence fall back to |
2083 | } else { | 2084 | * bit banging for now. */ |
2084 | sdvo->i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB); | 2085 | intel_gmbus_force_bit(sdvo->i2c, true); |
2085 | } | 2086 | } |
2087 | |||
2088 | /* undo any changes intel_sdvo_select_i2c_bus() did to sdvo->i2c */ | ||
2089 | static void | ||
2090 | intel_sdvo_unselect_i2c_bus(struct intel_sdvo *sdvo) | ||
2091 | { | ||
2092 | intel_gmbus_force_bit(sdvo->i2c, false); | ||
2086 | } | 2093 | } |
2087 | 2094 | ||
2088 | static bool | 2095 | static bool |
@@ -2658,10 +2665,8 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) | |||
2658 | intel_sdvo->is_sdvob = is_sdvob; | 2665 | intel_sdvo->is_sdvob = is_sdvob; |
2659 | intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1; | 2666 | intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1; |
2660 | intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg); | 2667 | intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg); |
2661 | if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) { | 2668 | if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) |
2662 | kfree(intel_sdvo); | 2669 | goto err_i2c_bus; |
2663 | return false; | ||
2664 | } | ||
2665 | 2670 | ||
2666 | /* encoder type will be decided later */ | 2671 | /* encoder type will be decided later */ |
2667 | intel_encoder = &intel_sdvo->base; | 2672 | intel_encoder = &intel_sdvo->base; |
@@ -2746,6 +2751,8 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) | |||
2746 | err: | 2751 | err: |
2747 | drm_encoder_cleanup(&intel_encoder->base); | 2752 | drm_encoder_cleanup(&intel_encoder->base); |
2748 | i2c_del_adapter(&intel_sdvo->ddc); | 2753 | i2c_del_adapter(&intel_sdvo->ddc); |
2754 | err_i2c_bus: | ||
2755 | intel_sdvo_unselect_i2c_bus(intel_sdvo); | ||
2749 | kfree(intel_sdvo); | 2756 | kfree(intel_sdvo); |
2750 | 2757 | ||
2751 | return false; | 2758 | return false; |
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 82f5e5c7009d..827dcd4edf1c 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
@@ -48,7 +48,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, | |||
48 | struct intel_plane *intel_plane = to_intel_plane(plane); | 48 | struct intel_plane *intel_plane = to_intel_plane(plane); |
49 | int pipe = intel_plane->pipe; | 49 | int pipe = intel_plane->pipe; |
50 | u32 sprctl, sprscale = 0; | 50 | u32 sprctl, sprscale = 0; |
51 | int pixel_size; | 51 | unsigned long sprsurf_offset, linear_offset; |
52 | int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); | ||
52 | 53 | ||
53 | sprctl = I915_READ(SPRCTL(pipe)); | 54 | sprctl = I915_READ(SPRCTL(pipe)); |
54 | 55 | ||
@@ -61,33 +62,24 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, | |||
61 | switch (fb->pixel_format) { | 62 | switch (fb->pixel_format) { |
62 | case DRM_FORMAT_XBGR8888: | 63 | case DRM_FORMAT_XBGR8888: |
63 | sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX; | 64 | sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX; |
64 | pixel_size = 4; | ||
65 | break; | 65 | break; |
66 | case DRM_FORMAT_XRGB8888: | 66 | case DRM_FORMAT_XRGB8888: |
67 | sprctl |= SPRITE_FORMAT_RGBX888; | 67 | sprctl |= SPRITE_FORMAT_RGBX888; |
68 | pixel_size = 4; | ||
69 | break; | 68 | break; |
70 | case DRM_FORMAT_YUYV: | 69 | case DRM_FORMAT_YUYV: |
71 | sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV; | 70 | sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV; |
72 | pixel_size = 2; | ||
73 | break; | 71 | break; |
74 | case DRM_FORMAT_YVYU: | 72 | case DRM_FORMAT_YVYU: |
75 | sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU; | 73 | sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU; |
76 | pixel_size = 2; | ||
77 | break; | 74 | break; |
78 | case DRM_FORMAT_UYVY: | 75 | case DRM_FORMAT_UYVY: |
79 | sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY; | 76 | sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY; |
80 | pixel_size = 2; | ||
81 | break; | 77 | break; |
82 | case DRM_FORMAT_VYUY: | 78 | case DRM_FORMAT_VYUY: |
83 | sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY; | 79 | sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY; |
84 | pixel_size = 2; | ||
85 | break; | 80 | break; |
86 | default: | 81 | default: |
87 | DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n"); | 82 | BUG(); |
88 | sprctl |= SPRITE_FORMAT_RGBX888; | ||
89 | pixel_size = 4; | ||
90 | break; | ||
91 | } | 83 | } |
92 | 84 | ||
93 | if (obj->tiling_mode != I915_TILING_NONE) | 85 | if (obj->tiling_mode != I915_TILING_NONE) |
@@ -127,18 +119,28 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, | |||
127 | 119 | ||
128 | I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); | 120 | I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); |
129 | I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); | 121 | I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); |
130 | if (obj->tiling_mode != I915_TILING_NONE) { | 122 | |
123 | linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); | ||
124 | sprsurf_offset = | ||
125 | intel_gen4_compute_offset_xtiled(&x, &y, | ||
126 | fb->bits_per_pixel / 8, | ||
127 | fb->pitches[0]); | ||
128 | linear_offset -= sprsurf_offset; | ||
129 | |||
130 | /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET | ||
131 | * register */ | ||
132 | if (IS_HASWELL(dev)) | ||
133 | I915_WRITE(SPROFFSET(pipe), (y << 16) | x); | ||
134 | else if (obj->tiling_mode != I915_TILING_NONE) | ||
131 | I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x); | 135 | I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x); |
132 | } else { | 136 | else |
133 | unsigned long offset; | 137 | I915_WRITE(SPRLINOFF(pipe), linear_offset); |
134 | 138 | ||
135 | offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); | ||
136 | I915_WRITE(SPRLINOFF(pipe), offset); | ||
137 | } | ||
138 | I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w); | 139 | I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w); |
139 | I915_WRITE(SPRSCALE(pipe), sprscale); | 140 | if (intel_plane->can_scale) |
141 | I915_WRITE(SPRSCALE(pipe), sprscale); | ||
140 | I915_WRITE(SPRCTL(pipe), sprctl); | 142 | I915_WRITE(SPRCTL(pipe), sprctl); |
141 | I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset); | 143 | I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset); |
142 | POSTING_READ(SPRSURF(pipe)); | 144 | POSTING_READ(SPRSURF(pipe)); |
143 | } | 145 | } |
144 | 146 | ||
@@ -152,7 +154,8 @@ ivb_disable_plane(struct drm_plane *plane) | |||
152 | 154 | ||
153 | I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE); | 155 | I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE); |
154 | /* Can't leave the scaler enabled... */ | 156 | /* Can't leave the scaler enabled... */ |
155 | I915_WRITE(SPRSCALE(pipe), 0); | 157 | if (intel_plane->can_scale) |
158 | I915_WRITE(SPRSCALE(pipe), 0); | ||
156 | /* Activate double buffered register update */ | 159 | /* Activate double buffered register update */ |
157 | I915_MODIFY_DISPBASE(SPRSURF(pipe), 0); | 160 | I915_MODIFY_DISPBASE(SPRSURF(pipe), 0); |
158 | POSTING_READ(SPRSURF(pipe)); | 161 | POSTING_READ(SPRSURF(pipe)); |
@@ -225,8 +228,10 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, | |||
225 | struct drm_device *dev = plane->dev; | 228 | struct drm_device *dev = plane->dev; |
226 | struct drm_i915_private *dev_priv = dev->dev_private; | 229 | struct drm_i915_private *dev_priv = dev->dev_private; |
227 | struct intel_plane *intel_plane = to_intel_plane(plane); | 230 | struct intel_plane *intel_plane = to_intel_plane(plane); |
228 | int pipe = intel_plane->pipe, pixel_size; | 231 | int pipe = intel_plane->pipe; |
232 | unsigned long dvssurf_offset, linear_offset; | ||
229 | u32 dvscntr, dvsscale; | 233 | u32 dvscntr, dvsscale; |
234 | int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); | ||
230 | 235 | ||
231 | dvscntr = I915_READ(DVSCNTR(pipe)); | 236 | dvscntr = I915_READ(DVSCNTR(pipe)); |
232 | 237 | ||
@@ -239,33 +244,24 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, | |||
239 | switch (fb->pixel_format) { | 244 | switch (fb->pixel_format) { |
240 | case DRM_FORMAT_XBGR8888: | 245 | case DRM_FORMAT_XBGR8888: |
241 | dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR; | 246 | dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR; |
242 | pixel_size = 4; | ||
243 | break; | 247 | break; |
244 | case DRM_FORMAT_XRGB8888: | 248 | case DRM_FORMAT_XRGB8888: |
245 | dvscntr |= DVS_FORMAT_RGBX888; | 249 | dvscntr |= DVS_FORMAT_RGBX888; |
246 | pixel_size = 4; | ||
247 | break; | 250 | break; |
248 | case DRM_FORMAT_YUYV: | 251 | case DRM_FORMAT_YUYV: |
249 | dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV; | 252 | dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV; |
250 | pixel_size = 2; | ||
251 | break; | 253 | break; |
252 | case DRM_FORMAT_YVYU: | 254 | case DRM_FORMAT_YVYU: |
253 | dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU; | 255 | dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU; |
254 | pixel_size = 2; | ||
255 | break; | 256 | break; |
256 | case DRM_FORMAT_UYVY: | 257 | case DRM_FORMAT_UYVY: |
257 | dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY; | 258 | dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY; |
258 | pixel_size = 2; | ||
259 | break; | 259 | break; |
260 | case DRM_FORMAT_VYUY: | 260 | case DRM_FORMAT_VYUY: |
261 | dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY; | 261 | dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY; |
262 | pixel_size = 2; | ||
263 | break; | 262 | break; |
264 | default: | 263 | default: |
265 | DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n"); | 264 | BUG(); |
266 | dvscntr |= DVS_FORMAT_RGBX888; | ||
267 | pixel_size = 4; | ||
268 | break; | ||
269 | } | 265 | } |
270 | 266 | ||
271 | if (obj->tiling_mode != I915_TILING_NONE) | 267 | if (obj->tiling_mode != I915_TILING_NONE) |
@@ -289,18 +285,23 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, | |||
289 | 285 | ||
290 | I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); | 286 | I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); |
291 | I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x); | 287 | I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x); |
292 | if (obj->tiling_mode != I915_TILING_NONE) { | 288 | |
289 | linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); | ||
290 | dvssurf_offset = | ||
291 | intel_gen4_compute_offset_xtiled(&x, &y, | ||
292 | fb->bits_per_pixel / 8, | ||
293 | fb->pitches[0]); | ||
294 | linear_offset -= dvssurf_offset; | ||
295 | |||
296 | if (obj->tiling_mode != I915_TILING_NONE) | ||
293 | I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x); | 297 | I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x); |
294 | } else { | 298 | else |
295 | unsigned long offset; | 299 | I915_WRITE(DVSLINOFF(pipe), linear_offset); |
296 | 300 | ||
297 | offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); | ||
298 | I915_WRITE(DVSLINOFF(pipe), offset); | ||
299 | } | ||
300 | I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); | 301 | I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); |
301 | I915_WRITE(DVSSCALE(pipe), dvsscale); | 302 | I915_WRITE(DVSSCALE(pipe), dvsscale); |
302 | I915_WRITE(DVSCNTR(pipe), dvscntr); | 303 | I915_WRITE(DVSCNTR(pipe), dvscntr); |
303 | I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset); | 304 | I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset); |
304 | POSTING_READ(DVSSURF(pipe)); | 305 | POSTING_READ(DVSSURF(pipe)); |
305 | } | 306 | } |
306 | 307 | ||
@@ -422,6 +423,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
422 | struct intel_framebuffer *intel_fb; | 423 | struct intel_framebuffer *intel_fb; |
423 | struct drm_i915_gem_object *obj, *old_obj; | 424 | struct drm_i915_gem_object *obj, *old_obj; |
424 | int pipe = intel_plane->pipe; | 425 | int pipe = intel_plane->pipe; |
426 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, | ||
427 | pipe); | ||
425 | int ret = 0; | 428 | int ret = 0; |
426 | int x = src_x >> 16, y = src_y >> 16; | 429 | int x = src_x >> 16, y = src_y >> 16; |
427 | int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay; | 430 | int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay; |
@@ -436,7 +439,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
436 | src_h = src_h >> 16; | 439 | src_h = src_h >> 16; |
437 | 440 | ||
438 | /* Pipe must be running... */ | 441 | /* Pipe must be running... */ |
439 | if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE)) | 442 | if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE)) |
440 | return -EINVAL; | 443 | return -EINVAL; |
441 | 444 | ||
442 | if (crtc_x >= primary_w || crtc_y >= primary_h) | 445 | if (crtc_x >= primary_w || crtc_y >= primary_h) |
@@ -446,6 +449,15 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
446 | if (intel_plane->pipe != intel_crtc->pipe) | 449 | if (intel_plane->pipe != intel_crtc->pipe) |
447 | return -EINVAL; | 450 | return -EINVAL; |
448 | 451 | ||
452 | /* Sprite planes can be linear or x-tiled surfaces */ | ||
453 | switch (obj->tiling_mode) { | ||
454 | case I915_TILING_NONE: | ||
455 | case I915_TILING_X: | ||
456 | break; | ||
457 | default: | ||
458 | return -EINVAL; | ||
459 | } | ||
460 | |||
449 | /* | 461 | /* |
450 | * Clamp the width & height into the visible area. Note we don't | 462 | * Clamp the width & height into the visible area. Note we don't |
451 | * try to scale the source if part of the visible region is offscreen. | 463 | * try to scale the source if part of the visible region is offscreen. |
@@ -473,6 +485,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
473 | goto out; | 485 | goto out; |
474 | 486 | ||
475 | /* | 487 | /* |
488 | * We may not have a scaler, eg. HSW does not have it any more | ||
489 | */ | ||
490 | if (!intel_plane->can_scale && (crtc_w != src_w || crtc_h != src_h)) | ||
491 | return -EINVAL; | ||
492 | |||
493 | /* | ||
476 | * We can take a larger source and scale it down, but | 494 | * We can take a larger source and scale it down, but |
477 | * only so much... 16x is the max on SNB. | 495 | * only so much... 16x is the max on SNB. |
478 | */ | 496 | */ |
@@ -665,6 +683,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe) | |||
665 | switch (INTEL_INFO(dev)->gen) { | 683 | switch (INTEL_INFO(dev)->gen) { |
666 | case 5: | 684 | case 5: |
667 | case 6: | 685 | case 6: |
686 | intel_plane->can_scale = true; | ||
668 | intel_plane->max_downscale = 16; | 687 | intel_plane->max_downscale = 16; |
669 | intel_plane->update_plane = ilk_update_plane; | 688 | intel_plane->update_plane = ilk_update_plane; |
670 | intel_plane->disable_plane = ilk_disable_plane; | 689 | intel_plane->disable_plane = ilk_disable_plane; |
@@ -681,6 +700,10 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe) | |||
681 | break; | 700 | break; |
682 | 701 | ||
683 | case 7: | 702 | case 7: |
703 | if (IS_HASWELL(dev) || IS_VALLEYVIEW(dev)) | ||
704 | intel_plane->can_scale = false; | ||
705 | else | ||
706 | intel_plane->can_scale = true; | ||
684 | intel_plane->max_downscale = 2; | 707 | intel_plane->max_downscale = 2; |
685 | intel_plane->update_plane = ivb_update_plane; | 708 | intel_plane->update_plane = ivb_update_plane; |
686 | intel_plane->disable_plane = ivb_disable_plane; | 709 | intel_plane->disable_plane = ivb_disable_plane; |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 62bb048c135e..86d5c20c325a 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -1088,13 +1088,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
1088 | int dspcntr_reg = DSPCNTR(intel_crtc->plane); | 1088 | int dspcntr_reg = DSPCNTR(intel_crtc->plane); |
1089 | int pipeconf = I915_READ(pipeconf_reg); | 1089 | int pipeconf = I915_READ(pipeconf_reg); |
1090 | int dspcntr = I915_READ(dspcntr_reg); | 1090 | int dspcntr = I915_READ(dspcntr_reg); |
1091 | int dspbase_reg = DSPADDR(intel_crtc->plane); | ||
1092 | int xpos = 0x0, ypos = 0x0; | 1091 | int xpos = 0x0, ypos = 0x0; |
1093 | unsigned int xsize, ysize; | 1092 | unsigned int xsize, ysize; |
1094 | /* Pipe must be off here */ | 1093 | /* Pipe must be off here */ |
1095 | I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE); | 1094 | I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE); |
1096 | /* Flush the plane changes */ | 1095 | intel_flush_display_plane(dev_priv, intel_crtc->plane); |
1097 | I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); | ||
1098 | 1096 | ||
1099 | /* Wait for vblank for the disable to take effect */ | 1097 | /* Wait for vblank for the disable to take effect */ |
1100 | if (IS_GEN2(dev)) | 1098 | if (IS_GEN2(dev)) |
@@ -1123,8 +1121,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
1123 | 1121 | ||
1124 | I915_WRITE(pipeconf_reg, pipeconf); | 1122 | I915_WRITE(pipeconf_reg, pipeconf); |
1125 | I915_WRITE(dspcntr_reg, dspcntr); | 1123 | I915_WRITE(dspcntr_reg, dspcntr); |
1126 | /* Flush the plane changes */ | 1124 | intel_flush_display_plane(dev_priv, intel_crtc->plane); |
1127 | I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); | ||
1128 | } | 1125 | } |
1129 | 1126 | ||
1130 | j = 0; | 1127 | j = 0; |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index d5699fe4f1e8..064023bed480 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -34,8 +34,7 @@ | |||
34 | 34 | ||
35 | /* move these to drm_dp_helper.c/h */ | 35 | /* move these to drm_dp_helper.c/h */ |
36 | #define DP_LINK_CONFIGURATION_SIZE 9 | 36 | #define DP_LINK_CONFIGURATION_SIZE 9 |
37 | #define DP_LINK_STATUS_SIZE 6 | 37 | #define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE |
38 | #define DP_DPCD_SIZE 8 | ||
39 | 38 | ||
40 | static char *voltage_names[] = { | 39 | static char *voltage_names[] = { |
41 | "0.4V", "0.6V", "0.8V", "1.2V" | 40 | "0.4V", "0.6V", "0.8V", "1.2V" |
@@ -290,78 +289,6 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
290 | 289 | ||
291 | /***** general DP utility functions *****/ | 290 | /***** general DP utility functions *****/ |
292 | 291 | ||
293 | static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r) | ||
294 | { | ||
295 | return link_status[r - DP_LANE0_1_STATUS]; | ||
296 | } | ||
297 | |||
298 | static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE], | ||
299 | int lane) | ||
300 | { | ||
301 | int i = DP_LANE0_1_STATUS + (lane >> 1); | ||
302 | int s = (lane & 1) * 4; | ||
303 | u8 l = dp_link_status(link_status, i); | ||
304 | return (l >> s) & 0xf; | ||
305 | } | ||
306 | |||
307 | static bool dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE], | ||
308 | int lane_count) | ||
309 | { | ||
310 | int lane; | ||
311 | u8 lane_status; | ||
312 | |||
313 | for (lane = 0; lane < lane_count; lane++) { | ||
314 | lane_status = dp_get_lane_status(link_status, lane); | ||
315 | if ((lane_status & DP_LANE_CR_DONE) == 0) | ||
316 | return false; | ||
317 | } | ||
318 | return true; | ||
319 | } | ||
320 | |||
321 | static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE], | ||
322 | int lane_count) | ||
323 | { | ||
324 | u8 lane_align; | ||
325 | u8 lane_status; | ||
326 | int lane; | ||
327 | |||
328 | lane_align = dp_link_status(link_status, | ||
329 | DP_LANE_ALIGN_STATUS_UPDATED); | ||
330 | if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) | ||
331 | return false; | ||
332 | for (lane = 0; lane < lane_count; lane++) { | ||
333 | lane_status = dp_get_lane_status(link_status, lane); | ||
334 | if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS) | ||
335 | return false; | ||
336 | } | ||
337 | return true; | ||
338 | } | ||
339 | |||
340 | static u8 dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE], | ||
341 | int lane) | ||
342 | |||
343 | { | ||
344 | int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); | ||
345 | int s = ((lane & 1) ? | ||
346 | DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : | ||
347 | DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); | ||
348 | u8 l = dp_link_status(link_status, i); | ||
349 | |||
350 | return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; | ||
351 | } | ||
352 | |||
353 | static u8 dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE], | ||
354 | int lane) | ||
355 | { | ||
356 | int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); | ||
357 | int s = ((lane & 1) ? | ||
358 | DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : | ||
359 | DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); | ||
360 | u8 l = dp_link_status(link_status, i); | ||
361 | |||
362 | return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; | ||
363 | } | ||
364 | |||
365 | #define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200 | 292 | #define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200 |
366 | #define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPHASIS_9_5 | 293 | #define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPHASIS_9_5 |
367 | 294 | ||
@@ -374,8 +301,8 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE], | |||
374 | int lane; | 301 | int lane; |
375 | 302 | ||
376 | for (lane = 0; lane < lane_count; lane++) { | 303 | for (lane = 0; lane < lane_count; lane++) { |
377 | u8 this_v = dp_get_adjust_request_voltage(link_status, lane); | 304 | u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane); |
378 | u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane); | 305 | u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); |
379 | 306 | ||
380 | DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n", | 307 | DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n", |
381 | lane, | 308 | lane, |
@@ -420,37 +347,6 @@ static int dp_get_max_dp_pix_clock(int link_rate, | |||
420 | return (link_rate * lane_num * 8) / bpp; | 347 | return (link_rate * lane_num * 8) / bpp; |
421 | } | 348 | } |
422 | 349 | ||
423 | static int dp_get_max_link_rate(u8 dpcd[DP_DPCD_SIZE]) | ||
424 | { | ||
425 | switch (dpcd[DP_MAX_LINK_RATE]) { | ||
426 | case DP_LINK_BW_1_62: | ||
427 | default: | ||
428 | return 162000; | ||
429 | case DP_LINK_BW_2_7: | ||
430 | return 270000; | ||
431 | case DP_LINK_BW_5_4: | ||
432 | return 540000; | ||
433 | } | ||
434 | } | ||
435 | |||
436 | static u8 dp_get_max_lane_number(u8 dpcd[DP_DPCD_SIZE]) | ||
437 | { | ||
438 | return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; | ||
439 | } | ||
440 | |||
441 | static u8 dp_get_dp_link_rate_coded(int link_rate) | ||
442 | { | ||
443 | switch (link_rate) { | ||
444 | case 162000: | ||
445 | default: | ||
446 | return DP_LINK_BW_1_62; | ||
447 | case 270000: | ||
448 | return DP_LINK_BW_2_7; | ||
449 | case 540000: | ||
450 | return DP_LINK_BW_5_4; | ||
451 | } | ||
452 | } | ||
453 | |||
454 | /***** radeon specific DP functions *****/ | 350 | /***** radeon specific DP functions *****/ |
455 | 351 | ||
456 | /* First get the min lane# when low rate is used according to pixel clock | 352 | /* First get the min lane# when low rate is used according to pixel clock |
@@ -462,8 +358,8 @@ static int radeon_dp_get_dp_lane_number(struct drm_connector *connector, | |||
462 | int pix_clock) | 358 | int pix_clock) |
463 | { | 359 | { |
464 | int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector)); | 360 | int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector)); |
465 | int max_link_rate = dp_get_max_link_rate(dpcd); | 361 | int max_link_rate = drm_dp_max_link_rate(dpcd); |
466 | int max_lane_num = dp_get_max_lane_number(dpcd); | 362 | int max_lane_num = drm_dp_max_lane_count(dpcd); |
467 | int lane_num; | 363 | int lane_num; |
468 | int max_dp_pix_clock; | 364 | int max_dp_pix_clock; |
469 | 365 | ||
@@ -500,7 +396,7 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector, | |||
500 | return 540000; | 396 | return 540000; |
501 | } | 397 | } |
502 | 398 | ||
503 | return dp_get_max_link_rate(dpcd); | 399 | return drm_dp_max_link_rate(dpcd); |
504 | } | 400 | } |
505 | 401 | ||
506 | static u8 radeon_dp_encoder_service(struct radeon_device *rdev, | 402 | static u8 radeon_dp_encoder_service(struct radeon_device *rdev, |
@@ -551,14 +447,15 @@ static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector) | |||
551 | bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector) | 447 | bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector) |
552 | { | 448 | { |
553 | struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; | 449 | struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; |
554 | u8 msg[25]; | 450 | u8 msg[DP_DPCD_SIZE]; |
555 | int ret, i; | 451 | int ret, i; |
556 | 452 | ||
557 | ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg, 8, 0); | 453 | ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg, |
454 | DP_DPCD_SIZE, 0); | ||
558 | if (ret > 0) { | 455 | if (ret > 0) { |
559 | memcpy(dig_connector->dpcd, msg, 8); | 456 | memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); |
560 | DRM_DEBUG_KMS("DPCD: "); | 457 | DRM_DEBUG_KMS("DPCD: "); |
561 | for (i = 0; i < 8; i++) | 458 | for (i = 0; i < DP_DPCD_SIZE; i++) |
562 | DRM_DEBUG_KMS("%02x ", msg[i]); | 459 | DRM_DEBUG_KMS("%02x ", msg[i]); |
563 | DRM_DEBUG_KMS("\n"); | 460 | DRM_DEBUG_KMS("\n"); |
564 | 461 | ||
@@ -664,7 +561,7 @@ bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector) | |||
664 | 561 | ||
665 | if (!radeon_dp_get_link_status(radeon_connector, link_status)) | 562 | if (!radeon_dp_get_link_status(radeon_connector, link_status)) |
666 | return false; | 563 | return false; |
667 | if (dp_channel_eq_ok(link_status, dig->dp_lane_count)) | 564 | if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count)) |
668 | return false; | 565 | return false; |
669 | return true; | 566 | return true; |
670 | } | 567 | } |
@@ -677,9 +574,8 @@ struct radeon_dp_link_train_info { | |||
677 | int enc_id; | 574 | int enc_id; |
678 | int dp_clock; | 575 | int dp_clock; |
679 | int dp_lane_count; | 576 | int dp_lane_count; |
680 | int rd_interval; | ||
681 | bool tp3_supported; | 577 | bool tp3_supported; |
682 | u8 dpcd[8]; | 578 | u8 dpcd[DP_RECEIVER_CAP_SIZE]; |
683 | u8 train_set[4]; | 579 | u8 train_set[4]; |
684 | u8 link_status[DP_LINK_STATUS_SIZE]; | 580 | u8 link_status[DP_LINK_STATUS_SIZE]; |
685 | u8 tries; | 581 | u8 tries; |
@@ -765,7 +661,7 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info) | |||
765 | radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp); | 661 | radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp); |
766 | 662 | ||
767 | /* set the link rate on the sink */ | 663 | /* set the link rate on the sink */ |
768 | tmp = dp_get_dp_link_rate_coded(dp_info->dp_clock); | 664 | tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock); |
769 | radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp); | 665 | radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp); |
770 | 666 | ||
771 | /* start training on the source */ | 667 | /* start training on the source */ |
@@ -821,17 +717,14 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info) | |||
821 | dp_info->tries = 0; | 717 | dp_info->tries = 0; |
822 | voltage = 0xff; | 718 | voltage = 0xff; |
823 | while (1) { | 719 | while (1) { |
824 | if (dp_info->rd_interval == 0) | 720 | drm_dp_link_train_clock_recovery_delay(dp_info->dpcd); |
825 | udelay(100); | ||
826 | else | ||
827 | mdelay(dp_info->rd_interval * 4); | ||
828 | 721 | ||
829 | if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) { | 722 | if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) { |
830 | DRM_ERROR("displayport link status failed\n"); | 723 | DRM_ERROR("displayport link status failed\n"); |
831 | break; | 724 | break; |
832 | } | 725 | } |
833 | 726 | ||
834 | if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) { | 727 | if (drm_dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) { |
835 | clock_recovery = true; | 728 | clock_recovery = true; |
836 | break; | 729 | break; |
837 | } | 730 | } |
@@ -886,17 +779,14 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info) | |||
886 | dp_info->tries = 0; | 779 | dp_info->tries = 0; |
887 | channel_eq = false; | 780 | channel_eq = false; |
888 | while (1) { | 781 | while (1) { |
889 | if (dp_info->rd_interval == 0) | 782 | drm_dp_link_train_channel_eq_delay(dp_info->dpcd); |
890 | udelay(400); | ||
891 | else | ||
892 | mdelay(dp_info->rd_interval * 4); | ||
893 | 783 | ||
894 | if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) { | 784 | if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) { |
895 | DRM_ERROR("displayport link status failed\n"); | 785 | DRM_ERROR("displayport link status failed\n"); |
896 | break; | 786 | break; |
897 | } | 787 | } |
898 | 788 | ||
899 | if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) { | 789 | if (drm_dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) { |
900 | channel_eq = true; | 790 | channel_eq = true; |
901 | break; | 791 | break; |
902 | } | 792 | } |
@@ -974,14 +864,13 @@ void radeon_dp_link_train(struct drm_encoder *encoder, | |||
974 | else | 864 | else |
975 | dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A; | 865 | dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A; |
976 | 866 | ||
977 | dp_info.rd_interval = radeon_read_dpcd_reg(radeon_connector, DP_TRAINING_AUX_RD_INTERVAL); | ||
978 | tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT); | 867 | tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT); |
979 | if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED)) | 868 | if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED)) |
980 | dp_info.tp3_supported = true; | 869 | dp_info.tp3_supported = true; |
981 | else | 870 | else |
982 | dp_info.tp3_supported = false; | 871 | dp_info.tp3_supported = false; |
983 | 872 | ||
984 | memcpy(dp_info.dpcd, dig_connector->dpcd, 8); | 873 | memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE); |
985 | dp_info.rdev = rdev; | 874 | dp_info.rdev = rdev; |
986 | dp_info.encoder = encoder; | 875 | dp_info.encoder = encoder; |
987 | dp_info.connector = connector; | 876 | dp_info.connector = connector; |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 92c5f473cf08..d818b503b42f 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -427,7 +427,7 @@ struct radeon_connector_atom_dig { | |||
427 | uint32_t igp_lane_info; | 427 | uint32_t igp_lane_info; |
428 | /* displayport */ | 428 | /* displayport */ |
429 | struct radeon_i2c_chan *dp_i2c_bus; | 429 | struct radeon_i2c_chan *dp_i2c_bus; |
430 | u8 dpcd[8]; | 430 | u8 dpcd[DP_RECEIVER_CAP_SIZE]; |
431 | u8 dp_sink_type; | 431 | u8 dp_sink_type; |
432 | int dp_clock; | 432 | int dp_clock; |
433 | int dp_lane_count; | 433 | int dp_lane_count; |