diff options
author | Dave Airlie <airlied@redhat.com> | 2015-12-22 23:22:09 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2015-12-22 23:22:09 -0500 |
commit | ade1ba7346070709856d7e38f8d1a77b7aa710aa (patch) | |
tree | c3adebd818529757e52de73729e4ee444f09f398 /drivers/gpu/drm | |
parent | fd3e14ffbd9ec7593ba4dafc9452a91373a4df05 (diff) | |
parent | 7447a2b221cd4df3960e82478a4ee29312589611 (diff) |
Merge tag 'drm-intel-next-2015-12-18' of git://anongit.freedesktop.org/drm-intel into drm-next
- fix atomic watermark recomputation logic (Maarten)
- modeset sequence fixes for LPT (Ville)
- more kbl enabling&prep work (Rodrigo, Wayne)
- first bits for mst audio
- page dirty tracking fixes from Dave Gordon
- new get_eld hook from Takashi, also included in the sound tree
- fixup cursor handling when placed at address 0 (Ville)
- refactor VBT parsing code (Jani)
- rpm wakelock debug infrastructure ( Imre)
- fbdev is pinned again (Chris)
- tune the busywait logic to avoid wasting cpu cycles (Chris)
* tag 'drm-intel-next-2015-12-18' of git://anongit.freedesktop.org/drm-intel: (81 commits)
drm/i915: Update DRIVER_DATE to 20151218
drm/i915/skl: Default to noncoherent access up to F0
drm/i915: Only spin whilst waiting on the current request
drm/i915: Limit the busy wait on requests to 5us not 10ms!
drm/i915: Break busywaiting for requests on pending signals
drm/i915: don't enable autosuspend on platforms without RPM support
drm/i915/backlight: prefer dev_priv over dev pointer
drm/i915: Disable primary plane if we fail to reconstruct BIOS fb (v2)
drm/i915: Pin the ifbdev for the info->system_base GGTT mmapping
drm/i915: Set the map-and-fenceable flag for preallocated objects
drm/i915: mdelay(10) considered harmful
drm/i915: check that we are in an RPM atomic section in GGTT PTE updaters
drm/i915: add support for checking RPM atomic sections
drm/i915: check that we hold an RPM wakelock ref before we put it
drm/i915: add support for checking if we hold an RPM reference
drm/i915: use assert_rpm_wakelock_held instead of opencoding it
drm/i915: add assert_rpm_wakelock_held helper
drm/i915: remove HAS_RUNTIME_PM check from RPM get/put/assert helpers
drm/i915: get a permanent RPM reference on platforms w/o RPM support
drm/i915: refactor RPM disabling due to RC6 being disabled
...
Diffstat (limited to 'drivers/gpu/drm')
52 files changed, 1460 insertions, 803 deletions
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 3b6627dde9ff..c5a942b15d63 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c | |||
@@ -666,7 +666,9 @@ static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int por | |||
666 | } | 666 | } |
667 | 667 | ||
668 | static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num, | 668 | static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num, |
669 | u8 vcpi, uint16_t pbn) | 669 | u8 vcpi, uint16_t pbn, |
670 | u8 number_sdp_streams, | ||
671 | u8 *sdp_stream_sink) | ||
670 | { | 672 | { |
671 | struct drm_dp_sideband_msg_req_body req; | 673 | struct drm_dp_sideband_msg_req_body req; |
672 | memset(&req, 0, sizeof(req)); | 674 | memset(&req, 0, sizeof(req)); |
@@ -674,6 +676,9 @@ static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_n | |||
674 | req.u.allocate_payload.port_number = port_num; | 676 | req.u.allocate_payload.port_number = port_num; |
675 | req.u.allocate_payload.vcpi = vcpi; | 677 | req.u.allocate_payload.vcpi = vcpi; |
676 | req.u.allocate_payload.pbn = pbn; | 678 | req.u.allocate_payload.pbn = pbn; |
679 | req.u.allocate_payload.number_sdp_streams = number_sdp_streams; | ||
680 | memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink, | ||
681 | number_sdp_streams); | ||
677 | drm_dp_encode_sideband_req(&req, msg); | 682 | drm_dp_encode_sideband_req(&req, msg); |
678 | msg->path_msg = true; | 683 | msg->path_msg = true; |
679 | return 0; | 684 | return 0; |
@@ -1562,6 +1567,8 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, | |||
1562 | struct drm_dp_sideband_msg_tx *txmsg; | 1567 | struct drm_dp_sideband_msg_tx *txmsg; |
1563 | struct drm_dp_mst_branch *mstb; | 1568 | struct drm_dp_mst_branch *mstb; |
1564 | int len, ret; | 1569 | int len, ret; |
1570 | u8 sinks[DRM_DP_MAX_SDP_STREAMS]; | ||
1571 | int i; | ||
1565 | 1572 | ||
1566 | mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); | 1573 | mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); |
1567 | if (!mstb) | 1574 | if (!mstb) |
@@ -1573,10 +1580,13 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, | |||
1573 | goto fail_put; | 1580 | goto fail_put; |
1574 | } | 1581 | } |
1575 | 1582 | ||
1583 | for (i = 0; i < port->num_sdp_streams; i++) | ||
1584 | sinks[i] = i; | ||
1585 | |||
1576 | txmsg->dst = mstb; | 1586 | txmsg->dst = mstb; |
1577 | len = build_allocate_payload(txmsg, port->port_num, | 1587 | len = build_allocate_payload(txmsg, port->port_num, |
1578 | id, | 1588 | id, |
1579 | pbn); | 1589 | pbn, port->num_sdp_streams, sinks); |
1580 | 1590 | ||
1581 | drm_dp_queue_down_tx(mgr, txmsg); | 1591 | drm_dp_queue_down_tx(mgr, txmsg); |
1582 | 1592 | ||
@@ -2261,6 +2271,27 @@ out: | |||
2261 | EXPORT_SYMBOL(drm_dp_mst_detect_port); | 2271 | EXPORT_SYMBOL(drm_dp_mst_detect_port); |
2262 | 2272 | ||
2263 | /** | 2273 | /** |
2274 | * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not | ||
2275 | * @mgr: manager for this port | ||
2276 | * @port: unverified pointer to a port. | ||
2277 | * | ||
2278 | * This returns whether the port supports audio or not. | ||
2279 | */ | ||
2280 | bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr, | ||
2281 | struct drm_dp_mst_port *port) | ||
2282 | { | ||
2283 | bool ret = false; | ||
2284 | |||
2285 | port = drm_dp_get_validated_port_ref(mgr, port); | ||
2286 | if (!port) | ||
2287 | return ret; | ||
2288 | ret = port->has_audio; | ||
2289 | drm_dp_put_port(port); | ||
2290 | return ret; | ||
2291 | } | ||
2292 | EXPORT_SYMBOL(drm_dp_mst_port_has_audio); | ||
2293 | |||
2294 | /** | ||
2264 | * drm_dp_mst_get_edid() - get EDID for an MST port | 2295 | * drm_dp_mst_get_edid() - get EDID for an MST port |
2265 | * @connector: toplevel connector to get EDID for | 2296 | * @connector: toplevel connector to get EDID for |
2266 | * @mgr: manager for this port | 2297 | * @mgr: manager for this port |
@@ -2285,6 +2316,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_ | |||
2285 | edid = drm_get_edid(connector, &port->aux.ddc); | 2316 | edid = drm_get_edid(connector, &port->aux.ddc); |
2286 | drm_mode_connector_set_tile_property(connector); | 2317 | drm_mode_connector_set_tile_property(connector); |
2287 | } | 2318 | } |
2319 | port->has_audio = drm_detect_monitor_audio(edid); | ||
2288 | drm_dp_put_port(port); | 2320 | drm_dp_put_port(port); |
2289 | return edid; | 2321 | return edid; |
2290 | } | 2322 | } |
@@ -2568,7 +2600,7 @@ static void drm_dp_mst_dump_mstb(struct seq_file *m, | |||
2568 | 2600 | ||
2569 | seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports); | 2601 | seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports); |
2570 | list_for_each_entry(port, &mstb->ports, next) { | 2602 | list_for_each_entry(port, &mstb->ports, next) { |
2571 | seq_printf(m, "%sport: %d: ddps: %d ldps: %d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port, port->connector); | 2603 | seq_printf(m, "%sport: %d: ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector); |
2572 | if (port->mstb) | 2604 | if (port->mstb) |
2573 | drm_dp_mst_dump_mstb(m, port->mstb); | 2605 | drm_dp_mst_dump_mstb(m, port->mstb); |
2574 | } | 2606 | } |
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h index 13dea4263554..5e6a3013da49 100644 --- a/drivers/gpu/drm/i915/dvo.h +++ b/drivers/gpu/drm/i915/dvo.h | |||
@@ -129,11 +129,11 @@ struct intel_dvo_dev_ops { | |||
129 | void (*dump_regs)(struct intel_dvo_device *dvo); | 129 | void (*dump_regs)(struct intel_dvo_device *dvo); |
130 | }; | 130 | }; |
131 | 131 | ||
132 | extern struct intel_dvo_dev_ops sil164_ops; | 132 | extern const struct intel_dvo_dev_ops sil164_ops; |
133 | extern struct intel_dvo_dev_ops ch7xxx_ops; | 133 | extern const struct intel_dvo_dev_ops ch7xxx_ops; |
134 | extern struct intel_dvo_dev_ops ivch_ops; | 134 | extern const struct intel_dvo_dev_ops ivch_ops; |
135 | extern struct intel_dvo_dev_ops tfp410_ops; | 135 | extern const struct intel_dvo_dev_ops tfp410_ops; |
136 | extern struct intel_dvo_dev_ops ch7017_ops; | 136 | extern const struct intel_dvo_dev_ops ch7017_ops; |
137 | extern struct intel_dvo_dev_ops ns2501_ops; | 137 | extern const struct intel_dvo_dev_ops ns2501_ops; |
138 | 138 | ||
139 | #endif /* _INTEL_DVO_H */ | 139 | #endif /* _INTEL_DVO_H */ |
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c index cbb22027a3ce..b3c7c199200c 100644 --- a/drivers/gpu/drm/i915/dvo_ch7017.c +++ b/drivers/gpu/drm/i915/dvo_ch7017.c | |||
@@ -402,7 +402,7 @@ static void ch7017_destroy(struct intel_dvo_device *dvo) | |||
402 | } | 402 | } |
403 | } | 403 | } |
404 | 404 | ||
405 | struct intel_dvo_dev_ops ch7017_ops = { | 405 | const struct intel_dvo_dev_ops ch7017_ops = { |
406 | .init = ch7017_init, | 406 | .init = ch7017_init, |
407 | .detect = ch7017_detect, | 407 | .detect = ch7017_detect, |
408 | .mode_valid = ch7017_mode_valid, | 408 | .mode_valid = ch7017_mode_valid, |
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c index 4b4acc1a06fe..44b3159f2fe8 100644 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c | |||
@@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_dvo_device *dvo) | |||
356 | } | 356 | } |
357 | } | 357 | } |
358 | 358 | ||
359 | struct intel_dvo_dev_ops ch7xxx_ops = { | 359 | const struct intel_dvo_dev_ops ch7xxx_ops = { |
360 | .init = ch7xxx_init, | 360 | .init = ch7xxx_init, |
361 | .detect = ch7xxx_detect, | 361 | .detect = ch7xxx_detect, |
362 | .mode_valid = ch7xxx_mode_valid, | 362 | .mode_valid = ch7xxx_mode_valid, |
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c index ff9f1b077d83..4950b82f5b49 100644 --- a/drivers/gpu/drm/i915/dvo_ivch.c +++ b/drivers/gpu/drm/i915/dvo_ivch.c | |||
@@ -490,7 +490,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo) | |||
490 | } | 490 | } |
491 | } | 491 | } |
492 | 492 | ||
493 | struct intel_dvo_dev_ops ivch_ops = { | 493 | const struct intel_dvo_dev_ops ivch_ops = { |
494 | .init = ivch_init, | 494 | .init = ivch_init, |
495 | .dpms = ivch_dpms, | 495 | .dpms = ivch_dpms, |
496 | .get_hw_state = ivch_get_hw_state, | 496 | .get_hw_state = ivch_get_hw_state, |
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c index 063859fff0f0..2379c33cfe51 100644 --- a/drivers/gpu/drm/i915/dvo_ns2501.c +++ b/drivers/gpu/drm/i915/dvo_ns2501.c | |||
@@ -698,7 +698,7 @@ static void ns2501_destroy(struct intel_dvo_device *dvo) | |||
698 | } | 698 | } |
699 | } | 699 | } |
700 | 700 | ||
701 | struct intel_dvo_dev_ops ns2501_ops = { | 701 | const struct intel_dvo_dev_ops ns2501_ops = { |
702 | .init = ns2501_init, | 702 | .init = ns2501_init, |
703 | .detect = ns2501_detect, | 703 | .detect = ns2501_detect, |
704 | .mode_valid = ns2501_mode_valid, | 704 | .mode_valid = ns2501_mode_valid, |
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c index 26f13eb634f9..1c1a0674dbab 100644 --- a/drivers/gpu/drm/i915/dvo_sil164.c +++ b/drivers/gpu/drm/i915/dvo_sil164.c | |||
@@ -267,7 +267,7 @@ static void sil164_destroy(struct intel_dvo_device *dvo) | |||
267 | } | 267 | } |
268 | } | 268 | } |
269 | 269 | ||
270 | struct intel_dvo_dev_ops sil164_ops = { | 270 | const struct intel_dvo_dev_ops sil164_ops = { |
271 | .init = sil164_init, | 271 | .init = sil164_init, |
272 | .detect = sil164_detect, | 272 | .detect = sil164_detect, |
273 | .mode_valid = sil164_mode_valid, | 273 | .mode_valid = sil164_mode_valid, |
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c index 6f1a0a6d4e22..31e181da93db 100644 --- a/drivers/gpu/drm/i915/dvo_tfp410.c +++ b/drivers/gpu/drm/i915/dvo_tfp410.c | |||
@@ -306,7 +306,7 @@ static void tfp410_destroy(struct intel_dvo_device *dvo) | |||
306 | } | 306 | } |
307 | } | 307 | } |
308 | 308 | ||
309 | struct intel_dvo_dev_ops tfp410_ops = { | 309 | const struct intel_dvo_dev_ops tfp410_ops = { |
310 | .init = tfp410_init, | 310 | .init = tfp410_init, |
311 | .detect = tfp410_detect, | 311 | .detect = tfp410_detect, |
312 | .mode_valid = tfp410_mode_valid, | 312 | .mode_valid = tfp410_mode_valid, |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index a8721fccd8a0..0fc38bb7276c 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -1142,8 +1142,34 @@ static int i915_frequency_info(struct seq_file *m, void *unused) | |||
1142 | MEMSTAT_VID_SHIFT); | 1142 | MEMSTAT_VID_SHIFT); |
1143 | seq_printf(m, "Current P-state: %d\n", | 1143 | seq_printf(m, "Current P-state: %d\n", |
1144 | (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); | 1144 | (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); |
1145 | } else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) || | 1145 | } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
1146 | IS_BROADWELL(dev) || IS_GEN9(dev)) { | 1146 | u32 freq_sts; |
1147 | |||
1148 | mutex_lock(&dev_priv->rps.hw_lock); | ||
1149 | freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); | ||
1150 | seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); | ||
1151 | seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); | ||
1152 | |||
1153 | seq_printf(m, "actual GPU freq: %d MHz\n", | ||
1154 | intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff)); | ||
1155 | |||
1156 | seq_printf(m, "current GPU freq: %d MHz\n", | ||
1157 | intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq)); | ||
1158 | |||
1159 | seq_printf(m, "max GPU freq: %d MHz\n", | ||
1160 | intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); | ||
1161 | |||
1162 | seq_printf(m, "min GPU freq: %d MHz\n", | ||
1163 | intel_gpu_freq(dev_priv, dev_priv->rps.min_freq)); | ||
1164 | |||
1165 | seq_printf(m, "idle GPU freq: %d MHz\n", | ||
1166 | intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq)); | ||
1167 | |||
1168 | seq_printf(m, | ||
1169 | "efficient (RPe) frequency: %d MHz\n", | ||
1170 | intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); | ||
1171 | mutex_unlock(&dev_priv->rps.hw_lock); | ||
1172 | } else if (INTEL_INFO(dev)->gen >= 6) { | ||
1147 | u32 rp_state_limits; | 1173 | u32 rp_state_limits; |
1148 | u32 gt_perf_status; | 1174 | u32 gt_perf_status; |
1149 | u32 rp_state_cap; | 1175 | u32 rp_state_cap; |
@@ -1284,33 +1310,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused) | |||
1284 | seq_printf(m, | 1310 | seq_printf(m, |
1285 | "efficient (RPe) frequency: %d MHz\n", | 1311 | "efficient (RPe) frequency: %d MHz\n", |
1286 | intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); | 1312 | intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); |
1287 | } else if (IS_VALLEYVIEW(dev)) { | ||
1288 | u32 freq_sts; | ||
1289 | |||
1290 | mutex_lock(&dev_priv->rps.hw_lock); | ||
1291 | freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); | ||
1292 | seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); | ||
1293 | seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); | ||
1294 | |||
1295 | seq_printf(m, "actual GPU freq: %d MHz\n", | ||
1296 | intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff)); | ||
1297 | |||
1298 | seq_printf(m, "current GPU freq: %d MHz\n", | ||
1299 | intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq)); | ||
1300 | |||
1301 | seq_printf(m, "max GPU freq: %d MHz\n", | ||
1302 | intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); | ||
1303 | |||
1304 | seq_printf(m, "min GPU freq: %d MHz\n", | ||
1305 | intel_gpu_freq(dev_priv, dev_priv->rps.min_freq)); | ||
1306 | |||
1307 | seq_printf(m, "idle GPU freq: %d MHz\n", | ||
1308 | intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq)); | ||
1309 | |||
1310 | seq_printf(m, | ||
1311 | "efficient (RPe) frequency: %d MHz\n", | ||
1312 | intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); | ||
1313 | mutex_unlock(&dev_priv->rps.hw_lock); | ||
1314 | } else { | 1313 | } else { |
1315 | seq_puts(m, "no P-state info available\n"); | 1314 | seq_puts(m, "no P-state info available\n"); |
1316 | } | 1315 | } |
@@ -1602,7 +1601,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused) | |||
1602 | struct drm_info_node *node = m->private; | 1601 | struct drm_info_node *node = m->private; |
1603 | struct drm_device *dev = node->minor->dev; | 1602 | struct drm_device *dev = node->minor->dev; |
1604 | 1603 | ||
1605 | if (IS_VALLEYVIEW(dev)) | 1604 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
1606 | return vlv_drpc_info(m); | 1605 | return vlv_drpc_info(m); |
1607 | else if (INTEL_INFO(dev)->gen >= 6) | 1606 | else if (INTEL_INFO(dev)->gen >= 6) |
1608 | return gen6_drpc_info(m); | 1607 | return gen6_drpc_info(m); |
@@ -1743,7 +1742,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) | |||
1743 | sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; | 1742 | sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; |
1744 | else if (IS_PINEVIEW(dev)) | 1743 | else if (IS_PINEVIEW(dev)) |
1745 | sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; | 1744 | sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; |
1746 | else if (IS_VALLEYVIEW(dev)) | 1745 | else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
1747 | sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; | 1746 | sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; |
1748 | 1747 | ||
1749 | intel_runtime_pm_put(dev_priv); | 1748 | intel_runtime_pm_put(dev_priv); |
@@ -1843,25 +1842,31 @@ static int i915_opregion(struct seq_file *m, void *unused) | |||
1843 | struct drm_device *dev = node->minor->dev; | 1842 | struct drm_device *dev = node->minor->dev; |
1844 | struct drm_i915_private *dev_priv = dev->dev_private; | 1843 | struct drm_i915_private *dev_priv = dev->dev_private; |
1845 | struct intel_opregion *opregion = &dev_priv->opregion; | 1844 | struct intel_opregion *opregion = &dev_priv->opregion; |
1846 | void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL); | ||
1847 | int ret; | 1845 | int ret; |
1848 | 1846 | ||
1849 | if (data == NULL) | ||
1850 | return -ENOMEM; | ||
1851 | |||
1852 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 1847 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
1853 | if (ret) | 1848 | if (ret) |
1854 | goto out; | 1849 | goto out; |
1855 | 1850 | ||
1856 | if (opregion->header) { | 1851 | if (opregion->header) |
1857 | memcpy(data, opregion->header, OPREGION_SIZE); | 1852 | seq_write(m, opregion->header, OPREGION_SIZE); |
1858 | seq_write(m, data, OPREGION_SIZE); | ||
1859 | } | ||
1860 | 1853 | ||
1861 | mutex_unlock(&dev->struct_mutex); | 1854 | mutex_unlock(&dev->struct_mutex); |
1862 | 1855 | ||
1863 | out: | 1856 | out: |
1864 | kfree(data); | 1857 | return 0; |
1858 | } | ||
1859 | |||
1860 | static int i915_vbt(struct seq_file *m, void *unused) | ||
1861 | { | ||
1862 | struct drm_info_node *node = m->private; | ||
1863 | struct drm_device *dev = node->minor->dev; | ||
1864 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1865 | struct intel_opregion *opregion = &dev_priv->opregion; | ||
1866 | |||
1867 | if (opregion->vbt) | ||
1868 | seq_write(m, opregion->vbt, opregion->vbt_size); | ||
1869 | |||
1865 | return 0; | 1870 | return 0; |
1866 | } | 1871 | } |
1867 | 1872 | ||
@@ -2850,6 +2855,20 @@ static void intel_dp_info(struct seq_file *m, | |||
2850 | intel_panel_info(m, &intel_connector->panel); | 2855 | intel_panel_info(m, &intel_connector->panel); |
2851 | } | 2856 | } |
2852 | 2857 | ||
2858 | static void intel_dp_mst_info(struct seq_file *m, | ||
2859 | struct intel_connector *intel_connector) | ||
2860 | { | ||
2861 | struct intel_encoder *intel_encoder = intel_connector->encoder; | ||
2862 | struct intel_dp_mst_encoder *intel_mst = | ||
2863 | enc_to_mst(&intel_encoder->base); | ||
2864 | struct intel_digital_port *intel_dig_port = intel_mst->primary; | ||
2865 | struct intel_dp *intel_dp = &intel_dig_port->dp; | ||
2866 | bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, | ||
2867 | intel_connector->port); | ||
2868 | |||
2869 | seq_printf(m, "\taudio support: %s\n", yesno(has_audio)); | ||
2870 | } | ||
2871 | |||
2853 | static void intel_hdmi_info(struct seq_file *m, | 2872 | static void intel_hdmi_info(struct seq_file *m, |
2854 | struct intel_connector *intel_connector) | 2873 | struct intel_connector *intel_connector) |
2855 | { | 2874 | { |
@@ -2893,6 +2912,8 @@ static void intel_connector_info(struct seq_file *m, | |||
2893 | intel_hdmi_info(m, intel_connector); | 2912 | intel_hdmi_info(m, intel_connector); |
2894 | else if (intel_encoder->type == INTEL_OUTPUT_LVDS) | 2913 | else if (intel_encoder->type == INTEL_OUTPUT_LVDS) |
2895 | intel_lvds_info(m, intel_connector); | 2914 | intel_lvds_info(m, intel_connector); |
2915 | else if (intel_encoder->type == INTEL_OUTPUT_DP_MST) | ||
2916 | intel_dp_mst_info(m, intel_connector); | ||
2896 | } | 2917 | } |
2897 | 2918 | ||
2898 | seq_printf(m, "\tmodes:\n"); | 2919 | seq_printf(m, "\tmodes:\n"); |
@@ -3983,7 +4004,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, | |||
3983 | ret = i8xx_pipe_crc_ctl_reg(&source, &val); | 4004 | ret = i8xx_pipe_crc_ctl_reg(&source, &val); |
3984 | else if (INTEL_INFO(dev)->gen < 5) | 4005 | else if (INTEL_INFO(dev)->gen < 5) |
3985 | ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val); | 4006 | ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val); |
3986 | else if (IS_VALLEYVIEW(dev)) | 4007 | else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
3987 | ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val); | 4008 | ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val); |
3988 | else if (IS_GEN5(dev) || IS_GEN6(dev)) | 4009 | else if (IS_GEN5(dev) || IS_GEN6(dev)) |
3989 | ret = ilk_pipe_crc_ctl_reg(&source, &val); | 4010 | ret = ilk_pipe_crc_ctl_reg(&source, &val); |
@@ -4052,7 +4073,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, | |||
4052 | 4073 | ||
4053 | if (IS_G4X(dev)) | 4074 | if (IS_G4X(dev)) |
4054 | g4x_undo_pipe_scramble_reset(dev, pipe); | 4075 | g4x_undo_pipe_scramble_reset(dev, pipe); |
4055 | else if (IS_VALLEYVIEW(dev)) | 4076 | else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
4056 | vlv_undo_pipe_scramble_reset(dev, pipe); | 4077 | vlv_undo_pipe_scramble_reset(dev, pipe); |
4057 | else if (IS_HASWELL(dev) && pipe == PIPE_A) | 4078 | else if (IS_HASWELL(dev) && pipe == PIPE_A) |
4058 | hsw_trans_edp_pipe_A_crc_wa(dev, false); | 4079 | hsw_trans_edp_pipe_A_crc_wa(dev, false); |
@@ -4442,7 +4463,8 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8]) | |||
4442 | * - WM1+ latency values in 0.5us units | 4463 | * - WM1+ latency values in 0.5us units |
4443 | * - latencies are in us on gen9/vlv/chv | 4464 | * - latencies are in us on gen9/vlv/chv |
4444 | */ | 4465 | */ |
4445 | if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev)) | 4466 | if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev) || |
4467 | IS_CHERRYVIEW(dev)) | ||
4446 | latency *= 10; | 4468 | latency *= 10; |
4447 | else if (level > 0) | 4469 | else if (level > 0) |
4448 | latency *= 5; | 4470 | latency *= 5; |
@@ -5316,6 +5338,7 @@ static const struct drm_info_list i915_debugfs_list[] = { | |||
5316 | {"i915_ips_status", i915_ips_status, 0}, | 5338 | {"i915_ips_status", i915_ips_status, 0}, |
5317 | {"i915_sr_status", i915_sr_status, 0}, | 5339 | {"i915_sr_status", i915_sr_status, 0}, |
5318 | {"i915_opregion", i915_opregion, 0}, | 5340 | {"i915_opregion", i915_opregion, 0}, |
5341 | {"i915_vbt", i915_vbt, 0}, | ||
5319 | {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, | 5342 | {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, |
5320 | {"i915_context_status", i915_context_status, 0}, | 5343 | {"i915_context_status", i915_context_status, 0}, |
5321 | {"i915_dump_lrc", i915_dump_lrc, 0}, | 5344 | {"i915_dump_lrc", i915_dump_lrc, 0}, |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index a81c76603544..988a3806512a 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -169,6 +169,9 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
169 | case I915_PARAM_HAS_RESOURCE_STREAMER: | 169 | case I915_PARAM_HAS_RESOURCE_STREAMER: |
170 | value = HAS_RESOURCE_STREAMER(dev); | 170 | value = HAS_RESOURCE_STREAMER(dev); |
171 | break; | 171 | break; |
172 | case I915_PARAM_HAS_EXEC_SOFTPIN: | ||
173 | value = 1; | ||
174 | break; | ||
172 | default: | 175 | default: |
173 | DRM_DEBUG("Unknown parameter %d\n", param->param); | 176 | DRM_DEBUG("Unknown parameter %d\n", param->param); |
174 | return -EINVAL; | 177 | return -EINVAL; |
@@ -256,7 +259,7 @@ intel_setup_mchbar(struct drm_device *dev) | |||
256 | u32 temp; | 259 | u32 temp; |
257 | bool enabled; | 260 | bool enabled; |
258 | 261 | ||
259 | if (IS_VALLEYVIEW(dev)) | 262 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
260 | return; | 263 | return; |
261 | 264 | ||
262 | dev_priv->mchbar_need_disable = false; | 265 | dev_priv->mchbar_need_disable = false; |
@@ -367,7 +370,7 @@ static int i915_load_modeset_init(struct drm_device *dev) | |||
367 | struct drm_i915_private *dev_priv = dev->dev_private; | 370 | struct drm_i915_private *dev_priv = dev->dev_private; |
368 | int ret; | 371 | int ret; |
369 | 372 | ||
370 | ret = intel_parse_bios(dev); | 373 | ret = intel_bios_init(dev_priv); |
371 | if (ret) | 374 | if (ret) |
372 | DRM_INFO("failed to find VBIOS tables\n"); | 375 | DRM_INFO("failed to find VBIOS tables\n"); |
373 | 376 | ||
@@ -779,7 +782,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev) | |||
779 | info->num_sprites[PIPE_A] = 2; | 782 | info->num_sprites[PIPE_A] = 2; |
780 | info->num_sprites[PIPE_B] = 2; | 783 | info->num_sprites[PIPE_B] = 2; |
781 | info->num_sprites[PIPE_C] = 1; | 784 | info->num_sprites[PIPE_C] = 1; |
782 | } else if (IS_VALLEYVIEW(dev)) | 785 | } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
783 | for_each_pipe(dev_priv, pipe) | 786 | for_each_pipe(dev_priv, pipe) |
784 | info->num_sprites[pipe] = 2; | 787 | info->num_sprites[pipe] = 2; |
785 | else | 788 | else |
@@ -791,7 +794,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev) | |||
791 | info->num_pipes = 0; | 794 | info->num_pipes = 0; |
792 | } else if (info->num_pipes > 0 && | 795 | } else if (info->num_pipes > 0 && |
793 | (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) && | 796 | (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) && |
794 | !IS_VALLEYVIEW(dev)) { | 797 | HAS_PCH_SPLIT(dev)) { |
795 | u32 fuse_strap = I915_READ(FUSE_STRAP); | 798 | u32 fuse_strap = I915_READ(FUSE_STRAP); |
796 | u32 sfuse_strap = I915_READ(SFUSE_STRAP); | 799 | u32 sfuse_strap = I915_READ(SFUSE_STRAP); |
797 | 800 | ||
@@ -836,9 +839,6 @@ static void intel_device_info_runtime_init(struct drm_device *dev) | |||
836 | 839 | ||
837 | static void intel_init_dpio(struct drm_i915_private *dev_priv) | 840 | static void intel_init_dpio(struct drm_i915_private *dev_priv) |
838 | { | 841 | { |
839 | if (!IS_VALLEYVIEW(dev_priv)) | ||
840 | return; | ||
841 | |||
842 | /* | 842 | /* |
843 | * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), | 843 | * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), |
844 | * CHV x1 PHY (DP/HDMI D) | 844 | * CHV x1 PHY (DP/HDMI D) |
@@ -847,7 +847,7 @@ static void intel_init_dpio(struct drm_i915_private *dev_priv) | |||
847 | if (IS_CHERRYVIEW(dev_priv)) { | 847 | if (IS_CHERRYVIEW(dev_priv)) { |
848 | DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; | 848 | DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; |
849 | DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; | 849 | DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; |
850 | } else { | 850 | } else if (IS_VALLEYVIEW(dev_priv)) { |
851 | DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; | 851 | DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; |
852 | } | 852 | } |
853 | } | 853 | } |
@@ -896,6 +896,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
896 | 896 | ||
897 | intel_pm_setup(dev); | 897 | intel_pm_setup(dev); |
898 | 898 | ||
899 | intel_runtime_pm_get(dev_priv); | ||
900 | |||
899 | intel_display_crc_init(dev); | 901 | intel_display_crc_init(dev); |
900 | 902 | ||
901 | i915_dump_device_info(dev_priv); | 903 | i915_dump_device_info(dev_priv); |
@@ -1085,6 +1087,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1085 | 1087 | ||
1086 | i915_audio_component_init(dev_priv); | 1088 | i915_audio_component_init(dev_priv); |
1087 | 1089 | ||
1090 | intel_runtime_pm_put(dev_priv); | ||
1091 | |||
1088 | return 0; | 1092 | return 0; |
1089 | 1093 | ||
1090 | out_power_well: | 1094 | out_power_well: |
@@ -1120,6 +1124,9 @@ free_priv: | |||
1120 | kmem_cache_destroy(dev_priv->requests); | 1124 | kmem_cache_destroy(dev_priv->requests); |
1121 | kmem_cache_destroy(dev_priv->vmas); | 1125 | kmem_cache_destroy(dev_priv->vmas); |
1122 | kmem_cache_destroy(dev_priv->objects); | 1126 | kmem_cache_destroy(dev_priv->objects); |
1127 | |||
1128 | intel_runtime_pm_put(dev_priv); | ||
1129 | |||
1123 | kfree(dev_priv); | 1130 | kfree(dev_priv); |
1124 | return ret; | 1131 | return ret; |
1125 | } | 1132 | } |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index e6935f1cb689..3ac616d7363b 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -311,7 +311,7 @@ static const struct intel_device_info intel_cherryview_info = { | |||
311 | .gen = 8, .num_pipes = 3, | 311 | .gen = 8, .num_pipes = 3, |
312 | .need_gfx_hws = 1, .has_hotplug = 1, | 312 | .need_gfx_hws = 1, .has_hotplug = 1, |
313 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, | 313 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, |
314 | .is_valleyview = 1, | 314 | .is_cherryview = 1, |
315 | .display_mmio_offset = VLV_DISPLAY_BASE, | 315 | .display_mmio_offset = VLV_DISPLAY_BASE, |
316 | GEN_CHV_PIPEOFFSETS, | 316 | GEN_CHV_PIPEOFFSETS, |
317 | CURSOR_OFFSETS, | 317 | CURSOR_OFFSETS, |
@@ -543,15 +543,12 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) | |||
543 | static void intel_suspend_encoders(struct drm_i915_private *dev_priv) | 543 | static void intel_suspend_encoders(struct drm_i915_private *dev_priv) |
544 | { | 544 | { |
545 | struct drm_device *dev = dev_priv->dev; | 545 | struct drm_device *dev = dev_priv->dev; |
546 | struct drm_encoder *encoder; | 546 | struct intel_encoder *encoder; |
547 | 547 | ||
548 | drm_modeset_lock_all(dev); | 548 | drm_modeset_lock_all(dev); |
549 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 549 | for_each_intel_encoder(dev, encoder) |
550 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); | 550 | if (encoder->suspend) |
551 | 551 | encoder->suspend(encoder); | |
552 | if (intel_encoder->suspend) | ||
553 | intel_encoder->suspend(intel_encoder); | ||
554 | } | ||
555 | drm_modeset_unlock_all(dev); | 552 | drm_modeset_unlock_all(dev); |
556 | } | 553 | } |
557 | 554 | ||
@@ -580,6 +577,8 @@ static int i915_drm_suspend(struct drm_device *dev) | |||
580 | dev_priv->modeset_restore = MODESET_SUSPENDED; | 577 | dev_priv->modeset_restore = MODESET_SUSPENDED; |
581 | mutex_unlock(&dev_priv->modeset_restore_lock); | 578 | mutex_unlock(&dev_priv->modeset_restore_lock); |
582 | 579 | ||
580 | disable_rpm_wakeref_asserts(dev_priv); | ||
581 | |||
583 | /* We do a lot of poking in a lot of registers, make sure they work | 582 | /* We do a lot of poking in a lot of registers, make sure they work |
584 | * properly. */ | 583 | * properly. */ |
585 | intel_display_set_init_power(dev_priv, true); | 584 | intel_display_set_init_power(dev_priv, true); |
@@ -592,7 +591,7 @@ static int i915_drm_suspend(struct drm_device *dev) | |||
592 | if (error) { | 591 | if (error) { |
593 | dev_err(&dev->pdev->dev, | 592 | dev_err(&dev->pdev->dev, |
594 | "GEM idle failed, resume might fail\n"); | 593 | "GEM idle failed, resume might fail\n"); |
595 | return error; | 594 | goto out; |
596 | } | 595 | } |
597 | 596 | ||
598 | intel_guc_suspend(dev); | 597 | intel_guc_suspend(dev); |
@@ -635,7 +634,10 @@ static int i915_drm_suspend(struct drm_device *dev) | |||
635 | if (HAS_CSR(dev_priv)) | 634 | if (HAS_CSR(dev_priv)) |
636 | flush_work(&dev_priv->csr.work); | 635 | flush_work(&dev_priv->csr.work); |
637 | 636 | ||
638 | return 0; | 637 | out: |
638 | enable_rpm_wakeref_asserts(dev_priv); | ||
639 | |||
640 | return error; | ||
639 | } | 641 | } |
640 | 642 | ||
641 | static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) | 643 | static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) |
@@ -644,6 +646,8 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) | |||
644 | bool fw_csr; | 646 | bool fw_csr; |
645 | int ret; | 647 | int ret; |
646 | 648 | ||
649 | disable_rpm_wakeref_asserts(dev_priv); | ||
650 | |||
647 | fw_csr = suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; | 651 | fw_csr = suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; |
648 | /* | 652 | /* |
649 | * In case of firmware assisted context save/restore don't manually | 653 | * In case of firmware assisted context save/restore don't manually |
@@ -662,7 +666,7 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) | |||
662 | if (!fw_csr) | 666 | if (!fw_csr) |
663 | intel_power_domains_init_hw(dev_priv, true); | 667 | intel_power_domains_init_hw(dev_priv, true); |
664 | 668 | ||
665 | return ret; | 669 | goto out; |
666 | } | 670 | } |
667 | 671 | ||
668 | pci_disable_device(drm_dev->pdev); | 672 | pci_disable_device(drm_dev->pdev); |
@@ -683,7 +687,10 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) | |||
683 | 687 | ||
684 | dev_priv->suspended_to_idle = suspend_to_idle(dev_priv); | 688 | dev_priv->suspended_to_idle = suspend_to_idle(dev_priv); |
685 | 689 | ||
686 | return 0; | 690 | out: |
691 | enable_rpm_wakeref_asserts(dev_priv); | ||
692 | |||
693 | return ret; | ||
687 | } | 694 | } |
688 | 695 | ||
689 | int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state) | 696 | int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state) |
@@ -714,6 +721,8 @@ static int i915_drm_resume(struct drm_device *dev) | |||
714 | { | 721 | { |
715 | struct drm_i915_private *dev_priv = dev->dev_private; | 722 | struct drm_i915_private *dev_priv = dev->dev_private; |
716 | 723 | ||
724 | disable_rpm_wakeref_asserts(dev_priv); | ||
725 | |||
717 | mutex_lock(&dev->struct_mutex); | 726 | mutex_lock(&dev->struct_mutex); |
718 | i915_gem_restore_gtt_mappings(dev); | 727 | i915_gem_restore_gtt_mappings(dev); |
719 | mutex_unlock(&dev->struct_mutex); | 728 | mutex_unlock(&dev->struct_mutex); |
@@ -778,6 +787,8 @@ static int i915_drm_resume(struct drm_device *dev) | |||
778 | 787 | ||
779 | drm_kms_helper_poll_enable(dev); | 788 | drm_kms_helper_poll_enable(dev); |
780 | 789 | ||
790 | enable_rpm_wakeref_asserts(dev_priv); | ||
791 | |||
781 | return 0; | 792 | return 0; |
782 | } | 793 | } |
783 | 794 | ||
@@ -802,7 +813,9 @@ static int i915_drm_resume_early(struct drm_device *dev) | |||
802 | 813 | ||
803 | pci_set_master(dev->pdev); | 814 | pci_set_master(dev->pdev); |
804 | 815 | ||
805 | if (IS_VALLEYVIEW(dev_priv)) | 816 | disable_rpm_wakeref_asserts(dev_priv); |
817 | |||
818 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | ||
806 | ret = vlv_resume_prepare(dev_priv, false); | 819 | ret = vlv_resume_prepare(dev_priv, false); |
807 | if (ret) | 820 | if (ret) |
808 | DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", | 821 | DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", |
@@ -823,6 +836,8 @@ static int i915_drm_resume_early(struct drm_device *dev) | |||
823 | out: | 836 | out: |
824 | dev_priv->suspended_to_idle = false; | 837 | dev_priv->suspended_to_idle = false; |
825 | 838 | ||
839 | enable_rpm_wakeref_asserts(dev_priv); | ||
840 | |||
826 | return ret; | 841 | return ret; |
827 | } | 842 | } |
828 | 843 | ||
@@ -1455,6 +1470,9 @@ static int intel_runtime_suspend(struct device *device) | |||
1455 | 1470 | ||
1456 | return -EAGAIN; | 1471 | return -EAGAIN; |
1457 | } | 1472 | } |
1473 | |||
1474 | disable_rpm_wakeref_asserts(dev_priv); | ||
1475 | |||
1458 | /* | 1476 | /* |
1459 | * We are safe here against re-faults, since the fault handler takes | 1477 | * We are safe here against re-faults, since the fault handler takes |
1460 | * an RPM reference. | 1478 | * an RPM reference. |
@@ -1462,6 +1480,8 @@ static int intel_runtime_suspend(struct device *device) | |||
1462 | i915_gem_release_all_mmaps(dev_priv); | 1480 | i915_gem_release_all_mmaps(dev_priv); |
1463 | mutex_unlock(&dev->struct_mutex); | 1481 | mutex_unlock(&dev->struct_mutex); |
1464 | 1482 | ||
1483 | cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); | ||
1484 | |||
1465 | intel_guc_suspend(dev); | 1485 | intel_guc_suspend(dev); |
1466 | 1486 | ||
1467 | intel_suspend_gt_powersave(dev); | 1487 | intel_suspend_gt_powersave(dev); |
@@ -1472,11 +1492,15 @@ static int intel_runtime_suspend(struct device *device) | |||
1472 | DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); | 1492 | DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); |
1473 | intel_runtime_pm_enable_interrupts(dev_priv); | 1493 | intel_runtime_pm_enable_interrupts(dev_priv); |
1474 | 1494 | ||
1495 | enable_rpm_wakeref_asserts(dev_priv); | ||
1496 | |||
1475 | return ret; | 1497 | return ret; |
1476 | } | 1498 | } |
1477 | 1499 | ||
1478 | cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); | ||
1479 | intel_uncore_forcewake_reset(dev, false); | 1500 | intel_uncore_forcewake_reset(dev, false); |
1501 | |||
1502 | enable_rpm_wakeref_asserts(dev_priv); | ||
1503 | WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); | ||
1480 | dev_priv->pm.suspended = true; | 1504 | dev_priv->pm.suspended = true; |
1481 | 1505 | ||
1482 | /* | 1506 | /* |
@@ -1520,6 +1544,9 @@ static int intel_runtime_resume(struct device *device) | |||
1520 | 1544 | ||
1521 | DRM_DEBUG_KMS("Resuming device\n"); | 1545 | DRM_DEBUG_KMS("Resuming device\n"); |
1522 | 1546 | ||
1547 | WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); | ||
1548 | disable_rpm_wakeref_asserts(dev_priv); | ||
1549 | |||
1523 | intel_opregion_notify_adapter(dev, PCI_D0); | 1550 | intel_opregion_notify_adapter(dev, PCI_D0); |
1524 | dev_priv->pm.suspended = false; | 1551 | dev_priv->pm.suspended = false; |
1525 | 1552 | ||
@@ -1532,7 +1559,7 @@ static int intel_runtime_resume(struct device *device) | |||
1532 | ret = bxt_resume_prepare(dev_priv); | 1559 | ret = bxt_resume_prepare(dev_priv); |
1533 | else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | 1560 | else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) |
1534 | hsw_disable_pc8(dev_priv); | 1561 | hsw_disable_pc8(dev_priv); |
1535 | else if (IS_VALLEYVIEW(dev_priv)) | 1562 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
1536 | ret = vlv_resume_prepare(dev_priv, true); | 1563 | ret = vlv_resume_prepare(dev_priv, true); |
1537 | 1564 | ||
1538 | /* | 1565 | /* |
@@ -1549,11 +1576,13 @@ static int intel_runtime_resume(struct device *device) | |||
1549 | * power well, so hpd is reinitialized from there. For | 1576 | * power well, so hpd is reinitialized from there. For |
1550 | * everyone else do it here. | 1577 | * everyone else do it here. |
1551 | */ | 1578 | */ |
1552 | if (!IS_VALLEYVIEW(dev_priv)) | 1579 | if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) |
1553 | intel_hpd_init(dev_priv); | 1580 | intel_hpd_init(dev_priv); |
1554 | 1581 | ||
1555 | intel_enable_gt_powersave(dev); | 1582 | intel_enable_gt_powersave(dev); |
1556 | 1583 | ||
1584 | enable_rpm_wakeref_asserts(dev_priv); | ||
1585 | |||
1557 | if (ret) | 1586 | if (ret) |
1558 | DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret); | 1587 | DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret); |
1559 | else | 1588 | else |
@@ -1574,7 +1603,7 @@ static int intel_suspend_complete(struct drm_i915_private *dev_priv) | |||
1574 | ret = bxt_suspend_complete(dev_priv); | 1603 | ret = bxt_suspend_complete(dev_priv); |
1575 | else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | 1604 | else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) |
1576 | ret = hsw_suspend_complete(dev_priv); | 1605 | ret = hsw_suspend_complete(dev_priv); |
1577 | else if (IS_VALLEYVIEW(dev_priv)) | 1606 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
1578 | ret = vlv_suspend_complete(dev_priv); | 1607 | ret = vlv_suspend_complete(dev_priv); |
1579 | else | 1608 | else |
1580 | ret = 0; | 1609 | ret = 0; |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f1a8a53e9e30..f0f75d7c0d94 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <uapi/drm/i915_drm.h> | 33 | #include <uapi/drm/i915_drm.h> |
34 | #include <uapi/drm/drm_fourcc.h> | 34 | #include <uapi/drm/drm_fourcc.h> |
35 | 35 | ||
36 | #include <drm/drmP.h> | ||
36 | #include "i915_reg.h" | 37 | #include "i915_reg.h" |
37 | #include "intel_bios.h" | 38 | #include "intel_bios.h" |
38 | #include "intel_ringbuffer.h" | 39 | #include "intel_ringbuffer.h" |
@@ -57,7 +58,7 @@ | |||
57 | 58 | ||
58 | #define DRIVER_NAME "i915" | 59 | #define DRIVER_NAME "i915" |
59 | #define DRIVER_DESC "Intel Graphics" | 60 | #define DRIVER_DESC "Intel Graphics" |
60 | #define DRIVER_DATE "20151204" | 61 | #define DRIVER_DATE "20151218" |
61 | 62 | ||
62 | #undef WARN_ON | 63 | #undef WARN_ON |
63 | /* Many gcc seem to no see through this and fall over :( */ | 64 | /* Many gcc seem to no see through this and fall over :( */ |
@@ -457,7 +458,9 @@ struct intel_opregion { | |||
457 | u32 swsci_gbda_sub_functions; | 458 | u32 swsci_gbda_sub_functions; |
458 | u32 swsci_sbcb_sub_functions; | 459 | u32 swsci_sbcb_sub_functions; |
459 | struct opregion_asle *asle; | 460 | struct opregion_asle *asle; |
460 | void *vbt; | 461 | void *rvda; |
462 | const void *vbt; | ||
463 | u32 vbt_size; | ||
461 | u32 *lid_state; | 464 | u32 *lid_state; |
462 | struct work_struct asle_work; | 465 | struct work_struct asle_work; |
463 | }; | 466 | }; |
@@ -763,6 +766,7 @@ struct intel_csr { | |||
763 | func(is_crestline) sep \ | 766 | func(is_crestline) sep \ |
764 | func(is_ivybridge) sep \ | 767 | func(is_ivybridge) sep \ |
765 | func(is_valleyview) sep \ | 768 | func(is_valleyview) sep \ |
769 | func(is_cherryview) sep \ | ||
766 | func(is_haswell) sep \ | 770 | func(is_haswell) sep \ |
767 | func(is_skylake) sep \ | 771 | func(is_skylake) sep \ |
768 | func(is_broxton) sep \ | 772 | func(is_broxton) sep \ |
@@ -1601,6 +1605,8 @@ struct skl_wm_level { | |||
1601 | * For more, read the Documentation/power/runtime_pm.txt. | 1605 | * For more, read the Documentation/power/runtime_pm.txt. |
1602 | */ | 1606 | */ |
1603 | struct i915_runtime_pm { | 1607 | struct i915_runtime_pm { |
1608 | atomic_t wakeref_count; | ||
1609 | atomic_t atomic_seq; | ||
1604 | bool suspended; | 1610 | bool suspended; |
1605 | bool irqs_enabled; | 1611 | bool irqs_enabled; |
1606 | }; | 1612 | }; |
@@ -1944,6 +1950,8 @@ struct drm_i915_private { | |||
1944 | /* perform PHY state sanity checks? */ | 1950 | /* perform PHY state sanity checks? */ |
1945 | bool chv_phy_assert[2]; | 1951 | bool chv_phy_assert[2]; |
1946 | 1952 | ||
1953 | struct intel_encoder *dig_port_map[I915_MAX_PORTS]; | ||
1954 | |||
1947 | /* | 1955 | /* |
1948 | * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch | 1956 | * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch |
1949 | * will be rejected. Instead look for a better place. | 1957 | * will be rejected. Instead look for a better place. |
@@ -2181,8 +2189,17 @@ struct drm_i915_gem_request { | |||
2181 | struct drm_i915_private *i915; | 2189 | struct drm_i915_private *i915; |
2182 | struct intel_engine_cs *ring; | 2190 | struct intel_engine_cs *ring; |
2183 | 2191 | ||
2184 | /** GEM sequence number associated with this request. */ | 2192 | /** GEM sequence number associated with the previous request, |
2185 | uint32_t seqno; | 2193 | * when the HWS breadcrumb is equal to this the GPU is processing |
2194 | * this request. | ||
2195 | */ | ||
2196 | u32 previous_seqno; | ||
2197 | |||
2198 | /** GEM sequence number associated with this request, | ||
2199 | * when the HWS breadcrumb is equal or greater than this the GPU | ||
2200 | * has finished processing this request. | ||
2201 | */ | ||
2202 | u32 seqno; | ||
2186 | 2203 | ||
2187 | /** Position in the ringbuffer of the start of the request */ | 2204 | /** Position in the ringbuffer of the start of the request */ |
2188 | u32 head; | 2205 | u32 head; |
@@ -2455,9 +2472,9 @@ struct drm_i915_cmd_table { | |||
2455 | INTEL_DEVID(dev) == 0x0152 || \ | 2472 | INTEL_DEVID(dev) == 0x0152 || \ |
2456 | INTEL_DEVID(dev) == 0x015a) | 2473 | INTEL_DEVID(dev) == 0x015a) |
2457 | #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) | 2474 | #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) |
2458 | #define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) | 2475 | #define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview) |
2459 | #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) | 2476 | #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) |
2460 | #define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) | 2477 | #define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_cherryview && IS_GEN8(dev)) |
2461 | #define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) | 2478 | #define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) |
2462 | #define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton) | 2479 | #define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton) |
2463 | #define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake) | 2480 | #define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake) |
@@ -2488,6 +2505,14 @@ struct drm_i915_cmd_table { | |||
2488 | #define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \ | 2505 | #define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \ |
2489 | INTEL_DEVID(dev) == 0x1915 || \ | 2506 | INTEL_DEVID(dev) == 0x1915 || \ |
2490 | INTEL_DEVID(dev) == 0x191E) | 2507 | INTEL_DEVID(dev) == 0x191E) |
2508 | #define IS_KBL_ULT(dev) (INTEL_DEVID(dev) == 0x5906 || \ | ||
2509 | INTEL_DEVID(dev) == 0x5913 || \ | ||
2510 | INTEL_DEVID(dev) == 0x5916 || \ | ||
2511 | INTEL_DEVID(dev) == 0x5921 || \ | ||
2512 | INTEL_DEVID(dev) == 0x5926) | ||
2513 | #define IS_KBL_ULX(dev) (INTEL_DEVID(dev) == 0x590E || \ | ||
2514 | INTEL_DEVID(dev) == 0x5915 || \ | ||
2515 | INTEL_DEVID(dev) == 0x591E) | ||
2491 | #define IS_SKL_GT3(dev) (IS_SKYLAKE(dev) && \ | 2516 | #define IS_SKL_GT3(dev) (IS_SKYLAKE(dev) && \ |
2492 | (INTEL_DEVID(dev) & 0x00F0) == 0x0020) | 2517 | (INTEL_DEVID(dev) & 0x00F0) == 0x0020) |
2493 | #define IS_SKL_GT4(dev) (IS_SKYLAKE(dev) && \ | 2518 | #define IS_SKL_GT4(dev) (IS_SKYLAKE(dev) && \ |
@@ -2584,20 +2609,22 @@ struct drm_i915_cmd_table { | |||
2584 | IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) | 2609 | IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) |
2585 | #define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ | 2610 | #define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ |
2586 | IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \ | 2611 | IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \ |
2587 | IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) | 2612 | IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \ |
2613 | IS_KABYLAKE(dev)) | ||
2588 | #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) | 2614 | #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) |
2589 | #define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) | 2615 | #define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) |
2590 | 2616 | ||
2591 | #define HAS_CSR(dev) (IS_GEN9(dev)) | 2617 | #define HAS_CSR(dev) (IS_GEN9(dev)) |
2592 | 2618 | ||
2593 | #define HAS_GUC_UCODE(dev) (IS_GEN9(dev)) | 2619 | #define HAS_GUC_UCODE(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev)) |
2594 | #define HAS_GUC_SCHED(dev) (IS_GEN9(dev)) | 2620 | #define HAS_GUC_SCHED(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev)) |
2595 | 2621 | ||
2596 | #define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \ | 2622 | #define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \ |
2597 | INTEL_INFO(dev)->gen >= 8) | 2623 | INTEL_INFO(dev)->gen >= 8) |
2598 | 2624 | ||
2599 | #define HAS_CORE_RING_FREQ(dev) (INTEL_INFO(dev)->gen >= 6 && \ | 2625 | #define HAS_CORE_RING_FREQ(dev) (INTEL_INFO(dev)->gen >= 6 && \ |
2600 | !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) | 2626 | !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && \ |
2627 | !IS_BROXTON(dev)) | ||
2601 | 2628 | ||
2602 | #define INTEL_PCH_DEVICE_ID_MASK 0xff00 | 2629 | #define INTEL_PCH_DEVICE_ID_MASK 0xff00 |
2603 | #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 | 2630 | #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 |
@@ -2620,7 +2647,8 @@ struct drm_i915_cmd_table { | |||
2620 | #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) | 2647 | #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) |
2621 | #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) | 2648 | #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) |
2622 | 2649 | ||
2623 | #define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)) | 2650 | #define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || \ |
2651 | IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) | ||
2624 | 2652 | ||
2625 | /* DPF == dynamic parity feature */ | 2653 | /* DPF == dynamic parity feature */ |
2626 | #define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) | 2654 | #define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) |
@@ -2860,6 +2888,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma); | |||
2860 | #define PIN_UPDATE (1<<5) | 2888 | #define PIN_UPDATE (1<<5) |
2861 | #define PIN_ZONE_4G (1<<6) | 2889 | #define PIN_ZONE_4G (1<<6) |
2862 | #define PIN_HIGH (1<<7) | 2890 | #define PIN_HIGH (1<<7) |
2891 | #define PIN_OFFSET_FIXED (1<<8) | ||
2863 | #define PIN_OFFSET_MASK (~4095) | 2892 | #define PIN_OFFSET_MASK (~4095) |
2864 | int __must_check | 2893 | int __must_check |
2865 | i915_gem_object_pin(struct drm_i915_gem_object *obj, | 2894 | i915_gem_object_pin(struct drm_i915_gem_object *obj, |
@@ -2874,6 +2903,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, | |||
2874 | 2903 | ||
2875 | int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, | 2904 | int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, |
2876 | u32 flags); | 2905 | u32 flags); |
2906 | void __i915_vma_set_map_and_fenceable(struct i915_vma *vma); | ||
2877 | int __must_check i915_vma_unbind(struct i915_vma *vma); | 2907 | int __must_check i915_vma_unbind(struct i915_vma *vma); |
2878 | /* | 2908 | /* |
2879 | * BEWARE: Do not use the function below unless you can _absolutely_ | 2909 | * BEWARE: Do not use the function below unless you can _absolutely_ |
@@ -2894,6 +2924,9 @@ static inline int __sg_page_count(struct scatterlist *sg) | |||
2894 | return sg->length >> PAGE_SHIFT; | 2924 | return sg->length >> PAGE_SHIFT; |
2895 | } | 2925 | } |
2896 | 2926 | ||
2927 | struct page * | ||
2928 | i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n); | ||
2929 | |||
2897 | static inline struct page * | 2930 | static inline struct page * |
2898 | i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) | 2931 | i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) |
2899 | { | 2932 | { |
@@ -2945,15 +2978,17 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) | |||
2945 | return (int32_t)(seq1 - seq2) >= 0; | 2978 | return (int32_t)(seq1 - seq2) >= 0; |
2946 | } | 2979 | } |
2947 | 2980 | ||
2981 | static inline bool i915_gem_request_started(struct drm_i915_gem_request *req, | ||
2982 | bool lazy_coherency) | ||
2983 | { | ||
2984 | u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency); | ||
2985 | return i915_seqno_passed(seqno, req->previous_seqno); | ||
2986 | } | ||
2987 | |||
2948 | static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, | 2988 | static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, |
2949 | bool lazy_coherency) | 2989 | bool lazy_coherency) |
2950 | { | 2990 | { |
2951 | u32 seqno; | 2991 | u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency); |
2952 | |||
2953 | BUG_ON(req == NULL); | ||
2954 | |||
2955 | seqno = req->ring->get_seqno(req->ring, lazy_coherency); | ||
2956 | |||
2957 | return i915_seqno_passed(seqno, req->seqno); | 2992 | return i915_seqno_passed(seqno, req->seqno); |
2958 | } | 2993 | } |
2959 | 2994 | ||
@@ -3205,6 +3240,7 @@ int __must_check i915_gem_evict_something(struct drm_device *dev, | |||
3205 | unsigned long start, | 3240 | unsigned long start, |
3206 | unsigned long end, | 3241 | unsigned long end, |
3207 | unsigned flags); | 3242 | unsigned flags); |
3243 | int __must_check i915_gem_evict_for_vma(struct i915_vma *target); | ||
3208 | int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); | 3244 | int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); |
3209 | 3245 | ||
3210 | /* belongs in i915_gem_gtt.h */ | 3246 | /* belongs in i915_gem_gtt.h */ |
@@ -3333,6 +3369,10 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) | |||
3333 | } | 3369 | } |
3334 | extern void intel_i2c_reset(struct drm_device *dev); | 3370 | extern void intel_i2c_reset(struct drm_device *dev); |
3335 | 3371 | ||
3372 | /* intel_bios.c */ | ||
3373 | int intel_bios_init(struct drm_i915_private *dev_priv); | ||
3374 | bool intel_bios_is_valid_vbt(const void *buf, size_t size); | ||
3375 | |||
3336 | /* intel_opregion.c */ | 3376 | /* intel_opregion.c */ |
3337 | #ifdef CONFIG_ACPI | 3377 | #ifdef CONFIG_ACPI |
3338 | extern int intel_opregion_setup(struct drm_device *dev); | 3378 | extern int intel_opregion_setup(struct drm_device *dev); |
@@ -3511,7 +3551,7 @@ __raw_write(64, q) | |||
3511 | 3551 | ||
3512 | static inline i915_reg_t i915_vgacntrl_reg(struct drm_device *dev) | 3552 | static inline i915_reg_t i915_vgacntrl_reg(struct drm_device *dev) |
3513 | { | 3553 | { |
3514 | if (IS_VALLEYVIEW(dev)) | 3554 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
3515 | return VLV_VGACNTRL; | 3555 | return VLV_VGACNTRL; |
3516 | else if (INTEL_INFO(dev)->gen >= 5) | 3556 | else if (INTEL_INFO(dev)->gen >= 5) |
3517 | return CPU_VGACNTRL; | 3557 | return CPU_VGACNTRL; |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b7d7cecdddf6..6c60e04fc09c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1146,23 +1146,74 @@ static bool missed_irq(struct drm_i915_private *dev_priv, | |||
1146 | return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings); | 1146 | return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings); |
1147 | } | 1147 | } |
1148 | 1148 | ||
1149 | static int __i915_spin_request(struct drm_i915_gem_request *req) | 1149 | static unsigned long local_clock_us(unsigned *cpu) |
1150 | { | ||
1151 | unsigned long t; | ||
1152 | |||
1153 | /* Cheaply and approximately convert from nanoseconds to microseconds. | ||
1154 | * The result and subsequent calculations are also defined in the same | ||
1155 | * approximate microseconds units. The principal source of timing | ||
1156 | * error here is from the simple truncation. | ||
1157 | * | ||
1158 | * Note that local_clock() is only defined wrt to the current CPU; | ||
1159 | * the comparisons are no longer valid if we switch CPUs. Instead of | ||
1160 | * blocking preemption for the entire busywait, we can detect the CPU | ||
1161 | * switch and use that as indicator of system load and a reason to | ||
1162 | * stop busywaiting, see busywait_stop(). | ||
1163 | */ | ||
1164 | *cpu = get_cpu(); | ||
1165 | t = local_clock() >> 10; | ||
1166 | put_cpu(); | ||
1167 | |||
1168 | return t; | ||
1169 | } | ||
1170 | |||
1171 | static bool busywait_stop(unsigned long timeout, unsigned cpu) | ||
1172 | { | ||
1173 | unsigned this_cpu; | ||
1174 | |||
1175 | if (time_after(local_clock_us(&this_cpu), timeout)) | ||
1176 | return true; | ||
1177 | |||
1178 | return this_cpu != cpu; | ||
1179 | } | ||
1180 | |||
1181 | static int __i915_spin_request(struct drm_i915_gem_request *req, int state) | ||
1150 | { | 1182 | { |
1151 | unsigned long timeout; | 1183 | unsigned long timeout; |
1184 | unsigned cpu; | ||
1185 | |||
1186 | /* When waiting for high frequency requests, e.g. during synchronous | ||
1187 | * rendering split between the CPU and GPU, the finite amount of time | ||
1188 | * required to set up the irq and wait upon it limits the response | ||
1189 | * rate. By busywaiting on the request completion for a short while we | ||
1190 | * can service the high frequency waits as quick as possible. However, | ||
1191 | * if it is a slow request, we want to sleep as quickly as possible. | ||
1192 | * The tradeoff between waiting and sleeping is roughly the time it | ||
1193 | * takes to sleep on a request, on the order of a microsecond. | ||
1194 | */ | ||
1152 | 1195 | ||
1153 | if (i915_gem_request_get_ring(req)->irq_refcount) | 1196 | if (req->ring->irq_refcount) |
1154 | return -EBUSY; | 1197 | return -EBUSY; |
1155 | 1198 | ||
1156 | timeout = jiffies + 1; | 1199 | /* Only spin if we know the GPU is processing this request */ |
1200 | if (!i915_gem_request_started(req, true)) | ||
1201 | return -EAGAIN; | ||
1202 | |||
1203 | timeout = local_clock_us(&cpu) + 5; | ||
1157 | while (!need_resched()) { | 1204 | while (!need_resched()) { |
1158 | if (i915_gem_request_completed(req, true)) | 1205 | if (i915_gem_request_completed(req, true)) |
1159 | return 0; | 1206 | return 0; |
1160 | 1207 | ||
1161 | if (time_after_eq(jiffies, timeout)) | 1208 | if (signal_pending_state(state, current)) |
1209 | break; | ||
1210 | |||
1211 | if (busywait_stop(timeout, cpu)) | ||
1162 | break; | 1212 | break; |
1163 | 1213 | ||
1164 | cpu_relax_lowlatency(); | 1214 | cpu_relax_lowlatency(); |
1165 | } | 1215 | } |
1216 | |||
1166 | if (i915_gem_request_completed(req, false)) | 1217 | if (i915_gem_request_completed(req, false)) |
1167 | return 0; | 1218 | return 0; |
1168 | 1219 | ||
@@ -1197,6 +1248,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, | |||
1197 | struct drm_i915_private *dev_priv = dev->dev_private; | 1248 | struct drm_i915_private *dev_priv = dev->dev_private; |
1198 | const bool irq_test_in_progress = | 1249 | const bool irq_test_in_progress = |
1199 | ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); | 1250 | ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); |
1251 | int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; | ||
1200 | DEFINE_WAIT(wait); | 1252 | DEFINE_WAIT(wait); |
1201 | unsigned long timeout_expire; | 1253 | unsigned long timeout_expire; |
1202 | s64 before, now; | 1254 | s64 before, now; |
@@ -1229,7 +1281,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, | |||
1229 | before = ktime_get_raw_ns(); | 1281 | before = ktime_get_raw_ns(); |
1230 | 1282 | ||
1231 | /* Optimistic spin for the next jiffie before touching IRQs */ | 1283 | /* Optimistic spin for the next jiffie before touching IRQs */ |
1232 | ret = __i915_spin_request(req); | 1284 | ret = __i915_spin_request(req, state); |
1233 | if (ret == 0) | 1285 | if (ret == 0) |
1234 | goto out; | 1286 | goto out; |
1235 | 1287 | ||
@@ -1241,8 +1293,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, | |||
1241 | for (;;) { | 1293 | for (;;) { |
1242 | struct timer_list timer; | 1294 | struct timer_list timer; |
1243 | 1295 | ||
1244 | prepare_to_wait(&ring->irq_queue, &wait, | 1296 | prepare_to_wait(&ring->irq_queue, &wait, state); |
1245 | interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); | ||
1246 | 1297 | ||
1247 | /* We need to check whether any gpu reset happened in between | 1298 | /* We need to check whether any gpu reset happened in between |
1248 | * the caller grabbing the seqno and now ... */ | 1299 | * the caller grabbing the seqno and now ... */ |
@@ -1260,7 +1311,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, | |||
1260 | break; | 1311 | break; |
1261 | } | 1312 | } |
1262 | 1313 | ||
1263 | if (interruptible && signal_pending(current)) { | 1314 | if (signal_pending_state(state, current)) { |
1264 | ret = -ERESTARTSYS; | 1315 | ret = -ERESTARTSYS; |
1265 | break; | 1316 | break; |
1266 | } | 1317 | } |
@@ -2554,6 +2605,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, | |||
2554 | request->batch_obj = obj; | 2605 | request->batch_obj = obj; |
2555 | 2606 | ||
2556 | request->emitted_jiffies = jiffies; | 2607 | request->emitted_jiffies = jiffies; |
2608 | request->previous_seqno = ring->last_submitted_seqno; | ||
2557 | ring->last_submitted_seqno = request->seqno; | 2609 | ring->last_submitted_seqno = request->seqno; |
2558 | list_add_tail(&request->list, &ring->request_list); | 2610 | list_add_tail(&request->list, &ring->request_list); |
2559 | 2611 | ||
@@ -2765,20 +2817,13 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, | |||
2765 | 2817 | ||
2766 | if (i915.enable_execlists) { | 2818 | if (i915.enable_execlists) { |
2767 | spin_lock_irq(&ring->execlist_lock); | 2819 | spin_lock_irq(&ring->execlist_lock); |
2768 | while (!list_empty(&ring->execlist_queue)) { | ||
2769 | struct drm_i915_gem_request *submit_req; | ||
2770 | 2820 | ||
2771 | submit_req = list_first_entry(&ring->execlist_queue, | 2821 | /* list_splice_tail_init checks for empty lists */ |
2772 | struct drm_i915_gem_request, | 2822 | list_splice_tail_init(&ring->execlist_queue, |
2773 | execlist_link); | 2823 | &ring->execlist_retired_req_list); |
2774 | list_del(&submit_req->execlist_link); | ||
2775 | 2824 | ||
2776 | if (submit_req->ctx != ring->default_context) | ||
2777 | intel_lr_context_unpin(submit_req); | ||
2778 | |||
2779 | i915_gem_request_unreference(submit_req); | ||
2780 | } | ||
2781 | spin_unlock_irq(&ring->execlist_lock); | 2825 | spin_unlock_irq(&ring->execlist_lock); |
2826 | intel_execlists_retire_requests(ring); | ||
2782 | } | 2827 | } |
2783 | 2828 | ||
2784 | /* | 2829 | /* |
@@ -3480,30 +3525,50 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, | |||
3480 | if (IS_ERR(vma)) | 3525 | if (IS_ERR(vma)) |
3481 | goto err_unpin; | 3526 | goto err_unpin; |
3482 | 3527 | ||
3483 | if (flags & PIN_HIGH) { | 3528 | if (flags & PIN_OFFSET_FIXED) { |
3484 | search_flag = DRM_MM_SEARCH_BELOW; | 3529 | uint64_t offset = flags & PIN_OFFSET_MASK; |
3485 | alloc_flag = DRM_MM_CREATE_TOP; | 3530 | |
3531 | if (offset & (alignment - 1) || offset + size > end) { | ||
3532 | ret = -EINVAL; | ||
3533 | goto err_free_vma; | ||
3534 | } | ||
3535 | vma->node.start = offset; | ||
3536 | vma->node.size = size; | ||
3537 | vma->node.color = obj->cache_level; | ||
3538 | ret = drm_mm_reserve_node(&vm->mm, &vma->node); | ||
3539 | if (ret) { | ||
3540 | ret = i915_gem_evict_for_vma(vma); | ||
3541 | if (ret == 0) | ||
3542 | ret = drm_mm_reserve_node(&vm->mm, &vma->node); | ||
3543 | } | ||
3544 | if (ret) | ||
3545 | goto err_free_vma; | ||
3486 | } else { | 3546 | } else { |
3487 | search_flag = DRM_MM_SEARCH_DEFAULT; | 3547 | if (flags & PIN_HIGH) { |
3488 | alloc_flag = DRM_MM_CREATE_DEFAULT; | 3548 | search_flag = DRM_MM_SEARCH_BELOW; |
3489 | } | 3549 | alloc_flag = DRM_MM_CREATE_TOP; |
3550 | } else { | ||
3551 | search_flag = DRM_MM_SEARCH_DEFAULT; | ||
3552 | alloc_flag = DRM_MM_CREATE_DEFAULT; | ||
3553 | } | ||
3490 | 3554 | ||
3491 | search_free: | 3555 | search_free: |
3492 | ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, | 3556 | ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, |
3493 | size, alignment, | 3557 | size, alignment, |
3494 | obj->cache_level, | 3558 | obj->cache_level, |
3495 | start, end, | 3559 | start, end, |
3496 | search_flag, | 3560 | search_flag, |
3497 | alloc_flag); | 3561 | alloc_flag); |
3498 | if (ret) { | 3562 | if (ret) { |
3499 | ret = i915_gem_evict_something(dev, vm, size, alignment, | 3563 | ret = i915_gem_evict_something(dev, vm, size, alignment, |
3500 | obj->cache_level, | 3564 | obj->cache_level, |
3501 | start, end, | 3565 | start, end, |
3502 | flags); | 3566 | flags); |
3503 | if (ret == 0) | 3567 | if (ret == 0) |
3504 | goto search_free; | 3568 | goto search_free; |
3505 | 3569 | ||
3506 | goto err_free_vma; | 3570 | goto err_free_vma; |
3571 | } | ||
3507 | } | 3572 | } |
3508 | if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) { | 3573 | if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) { |
3509 | ret = -EINVAL; | 3574 | ret = -EINVAL; |
@@ -4094,9 +4159,36 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags) | |||
4094 | vma->node.start < (flags & PIN_OFFSET_MASK)) | 4159 | vma->node.start < (flags & PIN_OFFSET_MASK)) |
4095 | return true; | 4160 | return true; |
4096 | 4161 | ||
4162 | if (flags & PIN_OFFSET_FIXED && | ||
4163 | vma->node.start != (flags & PIN_OFFSET_MASK)) | ||
4164 | return true; | ||
4165 | |||
4097 | return false; | 4166 | return false; |
4098 | } | 4167 | } |
4099 | 4168 | ||
4169 | void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) | ||
4170 | { | ||
4171 | struct drm_i915_gem_object *obj = vma->obj; | ||
4172 | bool mappable, fenceable; | ||
4173 | u32 fence_size, fence_alignment; | ||
4174 | |||
4175 | fence_size = i915_gem_get_gtt_size(obj->base.dev, | ||
4176 | obj->base.size, | ||
4177 | obj->tiling_mode); | ||
4178 | fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev, | ||
4179 | obj->base.size, | ||
4180 | obj->tiling_mode, | ||
4181 | true); | ||
4182 | |||
4183 | fenceable = (vma->node.size == fence_size && | ||
4184 | (vma->node.start & (fence_alignment - 1)) == 0); | ||
4185 | |||
4186 | mappable = (vma->node.start + fence_size <= | ||
4187 | to_i915(obj->base.dev)->gtt.mappable_end); | ||
4188 | |||
4189 | obj->map_and_fenceable = mappable && fenceable; | ||
4190 | } | ||
4191 | |||
4100 | static int | 4192 | static int |
4101 | i915_gem_object_do_pin(struct drm_i915_gem_object *obj, | 4193 | i915_gem_object_do_pin(struct drm_i915_gem_object *obj, |
4102 | struct i915_address_space *vm, | 4194 | struct i915_address_space *vm, |
@@ -4164,25 +4256,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj, | |||
4164 | 4256 | ||
4165 | if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL && | 4257 | if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL && |
4166 | (bound ^ vma->bound) & GLOBAL_BIND) { | 4258 | (bound ^ vma->bound) & GLOBAL_BIND) { |
4167 | bool mappable, fenceable; | 4259 | __i915_vma_set_map_and_fenceable(vma); |
4168 | u32 fence_size, fence_alignment; | ||
4169 | |||
4170 | fence_size = i915_gem_get_gtt_size(obj->base.dev, | ||
4171 | obj->base.size, | ||
4172 | obj->tiling_mode); | ||
4173 | fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev, | ||
4174 | obj->base.size, | ||
4175 | obj->tiling_mode, | ||
4176 | true); | ||
4177 | |||
4178 | fenceable = (vma->node.size == fence_size && | ||
4179 | (vma->node.start & (fence_alignment - 1)) == 0); | ||
4180 | |||
4181 | mappable = (vma->node.start + fence_size <= | ||
4182 | dev_priv->gtt.mappable_end); | ||
4183 | |||
4184 | obj->map_and_fenceable = mappable && fenceable; | ||
4185 | |||
4186 | WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable); | 4260 | WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable); |
4187 | } | 4261 | } |
4188 | 4262 | ||
@@ -4842,14 +4916,6 @@ int i915_gem_init(struct drm_device *dev) | |||
4842 | 4916 | ||
4843 | mutex_lock(&dev->struct_mutex); | 4917 | mutex_lock(&dev->struct_mutex); |
4844 | 4918 | ||
4845 | if (IS_VALLEYVIEW(dev)) { | ||
4846 | /* VLVA0 (potential hack), BIOS isn't actually waking us */ | ||
4847 | I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ); | ||
4848 | if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & | ||
4849 | VLV_GTLC_ALLOWWAKEACK), 10)) | ||
4850 | DRM_DEBUG_DRIVER("allow wake ack timed out\n"); | ||
4851 | } | ||
4852 | |||
4853 | if (!i915.enable_execlists) { | 4919 | if (!i915.enable_execlists) { |
4854 | dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission; | 4920 | dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission; |
4855 | dev_priv->gt.init_rings = i915_gem_init_rings; | 4921 | dev_priv->gt.init_rings = i915_gem_init_rings; |
@@ -4967,7 +5033,7 @@ i915_gem_load(struct drm_device *dev) | |||
4967 | 5033 | ||
4968 | dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; | 5034 | dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; |
4969 | 5035 | ||
4970 | if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) | 5036 | if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) |
4971 | dev_priv->num_fence_regs = 32; | 5037 | dev_priv->num_fence_regs = 32; |
4972 | else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | 5038 | else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
4973 | dev_priv->num_fence_regs = 16; | 5039 | dev_priv->num_fence_regs = 16; |
@@ -5188,6 +5254,21 @@ bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) | |||
5188 | return false; | 5254 | return false; |
5189 | } | 5255 | } |
5190 | 5256 | ||
5257 | /* Like i915_gem_object_get_page(), but mark the returned page dirty */ | ||
5258 | struct page * | ||
5259 | i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n) | ||
5260 | { | ||
5261 | struct page *page; | ||
5262 | |||
5263 | /* Only default objects have per-page dirty tracking */ | ||
5264 | if (WARN_ON(obj->ops != &i915_gem_object_ops)) | ||
5265 | return NULL; | ||
5266 | |||
5267 | page = i915_gem_object_get_page(obj, n); | ||
5268 | set_page_dirty(page); | ||
5269 | return page; | ||
5270 | } | ||
5271 | |||
5191 | /* Allocate a new GEM object and fill it with the supplied data */ | 5272 | /* Allocate a new GEM object and fill it with the supplied data */ |
5192 | struct drm_i915_gem_object * | 5273 | struct drm_i915_gem_object * |
5193 | i915_gem_object_create_from_data(struct drm_device *dev, | 5274 | i915_gem_object_create_from_data(struct drm_device *dev, |
@@ -5213,6 +5294,7 @@ i915_gem_object_create_from_data(struct drm_device *dev, | |||
5213 | i915_gem_object_pin_pages(obj); | 5294 | i915_gem_object_pin_pages(obj); |
5214 | sg = obj->pages; | 5295 | sg = obj->pages; |
5215 | bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size); | 5296 | bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size); |
5297 | obj->dirty = 1; /* Backing store is now out of date */ | ||
5216 | i915_gem_object_unpin_pages(obj); | 5298 | i915_gem_object_unpin_pages(obj); |
5217 | 5299 | ||
5218 | if (WARN_ON(bytes != size)) { | 5300 | if (WARN_ON(bytes != size)) { |
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 43761c5bcaca..900ffd044db8 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
@@ -189,8 +189,15 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) | |||
189 | * shouldn't touch the cache level, especially as that | 189 | * shouldn't touch the cache level, especially as that |
190 | * would make the object snooped which might have a | 190 | * would make the object snooped which might have a |
191 | * negative performance impact. | 191 | * negative performance impact. |
192 | * | ||
193 | * Snooping is required on non-llc platforms in execlist | ||
194 | * mode, but since all GGTT accesses use PAT entry 0 we | ||
195 | * get snooping anyway regardless of cache_level. | ||
196 | * | ||
197 | * This is only applicable for Ivy Bridge devices since | ||
198 | * later platforms don't have L3 control bits in the PTE. | ||
192 | */ | 199 | */ |
193 | if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) { | 200 | if (IS_IVYBRIDGE(dev)) { |
194 | ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC); | 201 | ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC); |
195 | /* Failure shouldn't ever happen this early */ | 202 | /* Failure shouldn't ever happen this early */ |
196 | if (WARN_ON(ret)) { | 203 | if (WARN_ON(ret)) { |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index d71a133ceff5..07c6e4d320c9 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -199,6 +199,45 @@ found: | |||
199 | return ret; | 199 | return ret; |
200 | } | 200 | } |
201 | 201 | ||
202 | int | ||
203 | i915_gem_evict_for_vma(struct i915_vma *target) | ||
204 | { | ||
205 | struct drm_mm_node *node, *next; | ||
206 | |||
207 | list_for_each_entry_safe(node, next, | ||
208 | &target->vm->mm.head_node.node_list, | ||
209 | node_list) { | ||
210 | struct i915_vma *vma; | ||
211 | int ret; | ||
212 | |||
213 | if (node->start + node->size <= target->node.start) | ||
214 | continue; | ||
215 | if (node->start >= target->node.start + target->node.size) | ||
216 | break; | ||
217 | |||
218 | vma = container_of(node, typeof(*vma), node); | ||
219 | |||
220 | if (vma->pin_count) { | ||
221 | if (!vma->exec_entry || (vma->pin_count > 1)) | ||
222 | /* Object is pinned for some other use */ | ||
223 | return -EBUSY; | ||
224 | |||
225 | /* We need to evict a buffer in the same batch */ | ||
226 | if (vma->exec_entry->flags & EXEC_OBJECT_PINNED) | ||
227 | /* Overlapping fixed objects in the same batch */ | ||
228 | return -EINVAL; | ||
229 | |||
230 | return -ENOSPC; | ||
231 | } | ||
232 | |||
233 | ret = i915_vma_unbind(vma); | ||
234 | if (ret) | ||
235 | return ret; | ||
236 | } | ||
237 | |||
238 | return 0; | ||
239 | } | ||
240 | |||
202 | /** | 241 | /** |
203 | * i915_gem_evict_vm - Evict all idle vmas from a vm | 242 | * i915_gem_evict_vm - Evict all idle vmas from a vm |
204 | * @vm: Address space to cleanse | 243 | * @vm: Address space to cleanse |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index a4c243cec4aa..5d01ea680dc1 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -264,7 +264,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj, | |||
264 | if (ret) | 264 | if (ret) |
265 | return ret; | 265 | return ret; |
266 | 266 | ||
267 | vaddr = kmap_atomic(i915_gem_object_get_page(obj, | 267 | vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, |
268 | reloc->offset >> PAGE_SHIFT)); | 268 | reloc->offset >> PAGE_SHIFT)); |
269 | *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta); | 269 | *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta); |
270 | 270 | ||
@@ -273,7 +273,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj, | |||
273 | 273 | ||
274 | if (page_offset == 0) { | 274 | if (page_offset == 0) { |
275 | kunmap_atomic(vaddr); | 275 | kunmap_atomic(vaddr); |
276 | vaddr = kmap_atomic(i915_gem_object_get_page(obj, | 276 | vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, |
277 | (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT)); | 277 | (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT)); |
278 | } | 278 | } |
279 | 279 | ||
@@ -355,7 +355,7 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj, | |||
355 | if (ret) | 355 | if (ret) |
356 | return ret; | 356 | return ret; |
357 | 357 | ||
358 | vaddr = kmap_atomic(i915_gem_object_get_page(obj, | 358 | vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, |
359 | reloc->offset >> PAGE_SHIFT)); | 359 | reloc->offset >> PAGE_SHIFT)); |
360 | clflush_write32(vaddr + page_offset, lower_32_bits(delta)); | 360 | clflush_write32(vaddr + page_offset, lower_32_bits(delta)); |
361 | 361 | ||
@@ -364,7 +364,7 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj, | |||
364 | 364 | ||
365 | if (page_offset == 0) { | 365 | if (page_offset == 0) { |
366 | kunmap_atomic(vaddr); | 366 | kunmap_atomic(vaddr); |
367 | vaddr = kmap_atomic(i915_gem_object_get_page(obj, | 367 | vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, |
368 | (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT)); | 368 | (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT)); |
369 | } | 369 | } |
370 | 370 | ||
@@ -599,6 +599,8 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, | |||
599 | flags |= PIN_GLOBAL | PIN_MAPPABLE; | 599 | flags |= PIN_GLOBAL | PIN_MAPPABLE; |
600 | if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) | 600 | if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) |
601 | flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; | 601 | flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; |
602 | if (entry->flags & EXEC_OBJECT_PINNED) | ||
603 | flags |= entry->offset | PIN_OFFSET_FIXED; | ||
602 | if ((flags & PIN_MAPPABLE) == 0) | 604 | if ((flags & PIN_MAPPABLE) == 0) |
603 | flags |= PIN_HIGH; | 605 | flags |= PIN_HIGH; |
604 | } | 606 | } |
@@ -670,6 +672,10 @@ eb_vma_misplaced(struct i915_vma *vma) | |||
670 | vma->node.start & (entry->alignment - 1)) | 672 | vma->node.start & (entry->alignment - 1)) |
671 | return true; | 673 | return true; |
672 | 674 | ||
675 | if (entry->flags & EXEC_OBJECT_PINNED && | ||
676 | vma->node.start != entry->offset) | ||
677 | return true; | ||
678 | |||
673 | if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS && | 679 | if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS && |
674 | vma->node.start < BATCH_OFFSET_BIAS) | 680 | vma->node.start < BATCH_OFFSET_BIAS) |
675 | return true; | 681 | return true; |
@@ -695,6 +701,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, | |||
695 | struct i915_vma *vma; | 701 | struct i915_vma *vma; |
696 | struct i915_address_space *vm; | 702 | struct i915_address_space *vm; |
697 | struct list_head ordered_vmas; | 703 | struct list_head ordered_vmas; |
704 | struct list_head pinned_vmas; | ||
698 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; | 705 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; |
699 | int retry; | 706 | int retry; |
700 | 707 | ||
@@ -703,6 +710,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, | |||
703 | vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm; | 710 | vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm; |
704 | 711 | ||
705 | INIT_LIST_HEAD(&ordered_vmas); | 712 | INIT_LIST_HEAD(&ordered_vmas); |
713 | INIT_LIST_HEAD(&pinned_vmas); | ||
706 | while (!list_empty(vmas)) { | 714 | while (!list_empty(vmas)) { |
707 | struct drm_i915_gem_exec_object2 *entry; | 715 | struct drm_i915_gem_exec_object2 *entry; |
708 | bool need_fence, need_mappable; | 716 | bool need_fence, need_mappable; |
@@ -721,7 +729,9 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, | |||
721 | obj->tiling_mode != I915_TILING_NONE; | 729 | obj->tiling_mode != I915_TILING_NONE; |
722 | need_mappable = need_fence || need_reloc_mappable(vma); | 730 | need_mappable = need_fence || need_reloc_mappable(vma); |
723 | 731 | ||
724 | if (need_mappable) { | 732 | if (entry->flags & EXEC_OBJECT_PINNED) |
733 | list_move_tail(&vma->exec_list, &pinned_vmas); | ||
734 | else if (need_mappable) { | ||
725 | entry->flags |= __EXEC_OBJECT_NEEDS_MAP; | 735 | entry->flags |= __EXEC_OBJECT_NEEDS_MAP; |
726 | list_move(&vma->exec_list, &ordered_vmas); | 736 | list_move(&vma->exec_list, &ordered_vmas); |
727 | } else | 737 | } else |
@@ -731,6 +741,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, | |||
731 | obj->base.pending_write_domain = 0; | 741 | obj->base.pending_write_domain = 0; |
732 | } | 742 | } |
733 | list_splice(&ordered_vmas, vmas); | 743 | list_splice(&ordered_vmas, vmas); |
744 | list_splice(&pinned_vmas, vmas); | ||
734 | 745 | ||
735 | /* Attempt to pin all of the buffers into the GTT. | 746 | /* Attempt to pin all of the buffers into the GTT. |
736 | * This is done in 3 phases: | 747 | * This is done in 3 phases: |
@@ -1317,7 +1328,8 @@ eb_get_batch(struct eb_vmas *eb) | |||
1317 | * Note that actual hangs have only been observed on gen7, but for | 1328 | * Note that actual hangs have only been observed on gen7, but for |
1318 | * paranoia do it everywhere. | 1329 | * paranoia do it everywhere. |
1319 | */ | 1330 | */ |
1320 | vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS; | 1331 | if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0) |
1332 | vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS; | ||
1321 | 1333 | ||
1322 | return vma->obj; | 1334 | return vma->obj; |
1323 | } | 1335 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 1f7e6b9df45d..52bc6c3dfe04 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -140,8 +140,7 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) | |||
140 | #endif | 140 | #endif |
141 | 141 | ||
142 | /* Early VLV doesn't have this */ | 142 | /* Early VLV doesn't have this */ |
143 | if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && | 143 | if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) { |
144 | dev->pdev->revision < 0xb) { | ||
145 | DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); | 144 | DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); |
146 | return 0; | 145 | return 0; |
147 | } | 146 | } |
@@ -770,10 +769,10 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, | |||
770 | gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length, | 769 | gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length, |
771 | scratch_pte); | 770 | scratch_pte); |
772 | } else { | 771 | } else { |
773 | uint64_t templ4, pml4e; | 772 | uint64_t pml4e; |
774 | struct i915_page_directory_pointer *pdp; | 773 | struct i915_page_directory_pointer *pdp; |
775 | 774 | ||
776 | gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) { | 775 | gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) { |
777 | gen8_ppgtt_clear_pte_range(vm, pdp, start, length, | 776 | gen8_ppgtt_clear_pte_range(vm, pdp, start, length, |
778 | scratch_pte); | 777 | scratch_pte); |
779 | } | 778 | } |
@@ -839,10 +838,10 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, | |||
839 | cache_level); | 838 | cache_level); |
840 | } else { | 839 | } else { |
841 | struct i915_page_directory_pointer *pdp; | 840 | struct i915_page_directory_pointer *pdp; |
842 | uint64_t templ4, pml4e; | 841 | uint64_t pml4e; |
843 | uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT; | 842 | uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT; |
844 | 843 | ||
845 | gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) { | 844 | gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) { |
846 | gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter, | 845 | gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter, |
847 | start, cache_level); | 846 | start, cache_level); |
848 | } | 847 | } |
@@ -1020,10 +1019,9 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm, | |||
1020 | { | 1019 | { |
1021 | struct drm_device *dev = vm->dev; | 1020 | struct drm_device *dev = vm->dev; |
1022 | struct i915_page_table *pt; | 1021 | struct i915_page_table *pt; |
1023 | uint64_t temp; | ||
1024 | uint32_t pde; | 1022 | uint32_t pde; |
1025 | 1023 | ||
1026 | gen8_for_each_pde(pt, pd, start, length, temp, pde) { | 1024 | gen8_for_each_pde(pt, pd, start, length, pde) { |
1027 | /* Don't reallocate page tables */ | 1025 | /* Don't reallocate page tables */ |
1028 | if (test_bit(pde, pd->used_pdes)) { | 1026 | if (test_bit(pde, pd->used_pdes)) { |
1029 | /* Scratch is never allocated this way */ | 1027 | /* Scratch is never allocated this way */ |
@@ -1082,13 +1080,12 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm, | |||
1082 | { | 1080 | { |
1083 | struct drm_device *dev = vm->dev; | 1081 | struct drm_device *dev = vm->dev; |
1084 | struct i915_page_directory *pd; | 1082 | struct i915_page_directory *pd; |
1085 | uint64_t temp; | ||
1086 | uint32_t pdpe; | 1083 | uint32_t pdpe; |
1087 | uint32_t pdpes = I915_PDPES_PER_PDP(dev); | 1084 | uint32_t pdpes = I915_PDPES_PER_PDP(dev); |
1088 | 1085 | ||
1089 | WARN_ON(!bitmap_empty(new_pds, pdpes)); | 1086 | WARN_ON(!bitmap_empty(new_pds, pdpes)); |
1090 | 1087 | ||
1091 | gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { | 1088 | gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { |
1092 | if (test_bit(pdpe, pdp->used_pdpes)) | 1089 | if (test_bit(pdpe, pdp->used_pdpes)) |
1093 | continue; | 1090 | continue; |
1094 | 1091 | ||
@@ -1136,12 +1133,11 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm, | |||
1136 | { | 1133 | { |
1137 | struct drm_device *dev = vm->dev; | 1134 | struct drm_device *dev = vm->dev; |
1138 | struct i915_page_directory_pointer *pdp; | 1135 | struct i915_page_directory_pointer *pdp; |
1139 | uint64_t temp; | ||
1140 | uint32_t pml4e; | 1136 | uint32_t pml4e; |
1141 | 1137 | ||
1142 | WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4)); | 1138 | WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4)); |
1143 | 1139 | ||
1144 | gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) { | 1140 | gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { |
1145 | if (!test_bit(pml4e, pml4->used_pml4es)) { | 1141 | if (!test_bit(pml4e, pml4->used_pml4es)) { |
1146 | pdp = alloc_pdp(dev); | 1142 | pdp = alloc_pdp(dev); |
1147 | if (IS_ERR(pdp)) | 1143 | if (IS_ERR(pdp)) |
@@ -1225,7 +1221,6 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm, | |||
1225 | struct i915_page_directory *pd; | 1221 | struct i915_page_directory *pd; |
1226 | const uint64_t orig_start = start; | 1222 | const uint64_t orig_start = start; |
1227 | const uint64_t orig_length = length; | 1223 | const uint64_t orig_length = length; |
1228 | uint64_t temp; | ||
1229 | uint32_t pdpe; | 1224 | uint32_t pdpe; |
1230 | uint32_t pdpes = I915_PDPES_PER_PDP(dev); | 1225 | uint32_t pdpes = I915_PDPES_PER_PDP(dev); |
1231 | int ret; | 1226 | int ret; |
@@ -1252,7 +1247,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm, | |||
1252 | } | 1247 | } |
1253 | 1248 | ||
1254 | /* For every page directory referenced, allocate page tables */ | 1249 | /* For every page directory referenced, allocate page tables */ |
1255 | gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { | 1250 | gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { |
1256 | ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length, | 1251 | ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length, |
1257 | new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES)); | 1252 | new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES)); |
1258 | if (ret) | 1253 | if (ret) |
@@ -1264,7 +1259,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm, | |||
1264 | 1259 | ||
1265 | /* Allocations have completed successfully, so set the bitmaps, and do | 1260 | /* Allocations have completed successfully, so set the bitmaps, and do |
1266 | * the mappings. */ | 1261 | * the mappings. */ |
1267 | gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { | 1262 | gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { |
1268 | gen8_pde_t *const page_directory = kmap_px(pd); | 1263 | gen8_pde_t *const page_directory = kmap_px(pd); |
1269 | struct i915_page_table *pt; | 1264 | struct i915_page_table *pt; |
1270 | uint64_t pd_len = length; | 1265 | uint64_t pd_len = length; |
@@ -1274,7 +1269,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm, | |||
1274 | /* Every pd should be allocated, we just did that above. */ | 1269 | /* Every pd should be allocated, we just did that above. */ |
1275 | WARN_ON(!pd); | 1270 | WARN_ON(!pd); |
1276 | 1271 | ||
1277 | gen8_for_each_pde(pt, pd, pd_start, pd_len, temp, pde) { | 1272 | gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) { |
1278 | /* Same reasoning as pd */ | 1273 | /* Same reasoning as pd */ |
1279 | WARN_ON(!pt); | 1274 | WARN_ON(!pt); |
1280 | WARN_ON(!pd_len); | 1275 | WARN_ON(!pd_len); |
@@ -1311,6 +1306,8 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm, | |||
1311 | 1306 | ||
1312 | err_out: | 1307 | err_out: |
1313 | while (pdpe--) { | 1308 | while (pdpe--) { |
1309 | unsigned long temp; | ||
1310 | |||
1314 | for_each_set_bit(temp, new_page_tables + pdpe * | 1311 | for_each_set_bit(temp, new_page_tables + pdpe * |
1315 | BITS_TO_LONGS(I915_PDES), I915_PDES) | 1312 | BITS_TO_LONGS(I915_PDES), I915_PDES) |
1316 | free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]); | 1313 | free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]); |
@@ -1333,7 +1330,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm, | |||
1333 | struct i915_hw_ppgtt *ppgtt = | 1330 | struct i915_hw_ppgtt *ppgtt = |
1334 | container_of(vm, struct i915_hw_ppgtt, base); | 1331 | container_of(vm, struct i915_hw_ppgtt, base); |
1335 | struct i915_page_directory_pointer *pdp; | 1332 | struct i915_page_directory_pointer *pdp; |
1336 | uint64_t temp, pml4e; | 1333 | uint64_t pml4e; |
1337 | int ret = 0; | 1334 | int ret = 0; |
1338 | 1335 | ||
1339 | /* Do the pml4 allocations first, so we don't need to track the newly | 1336 | /* Do the pml4 allocations first, so we don't need to track the newly |
@@ -1352,7 +1349,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm, | |||
1352 | "The allocation has spanned more than 512GB. " | 1349 | "The allocation has spanned more than 512GB. " |
1353 | "It is highly likely this is incorrect."); | 1350 | "It is highly likely this is incorrect."); |
1354 | 1351 | ||
1355 | gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) { | 1352 | gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { |
1356 | WARN_ON(!pdp); | 1353 | WARN_ON(!pdp); |
1357 | 1354 | ||
1358 | ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length); | 1355 | ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length); |
@@ -1392,10 +1389,9 @@ static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp, | |||
1392 | struct seq_file *m) | 1389 | struct seq_file *m) |
1393 | { | 1390 | { |
1394 | struct i915_page_directory *pd; | 1391 | struct i915_page_directory *pd; |
1395 | uint64_t temp; | ||
1396 | uint32_t pdpe; | 1392 | uint32_t pdpe; |
1397 | 1393 | ||
1398 | gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { | 1394 | gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { |
1399 | struct i915_page_table *pt; | 1395 | struct i915_page_table *pt; |
1400 | uint64_t pd_len = length; | 1396 | uint64_t pd_len = length; |
1401 | uint64_t pd_start = start; | 1397 | uint64_t pd_start = start; |
@@ -1405,7 +1401,7 @@ static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp, | |||
1405 | continue; | 1401 | continue; |
1406 | 1402 | ||
1407 | seq_printf(m, "\tPDPE #%d\n", pdpe); | 1403 | seq_printf(m, "\tPDPE #%d\n", pdpe); |
1408 | gen8_for_each_pde(pt, pd, pd_start, pd_len, temp, pde) { | 1404 | gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) { |
1409 | uint32_t pte; | 1405 | uint32_t pte; |
1410 | gen8_pte_t *pt_vaddr; | 1406 | gen8_pte_t *pt_vaddr; |
1411 | 1407 | ||
@@ -1455,11 +1451,11 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) | |||
1455 | if (!USES_FULL_48BIT_PPGTT(vm->dev)) { | 1451 | if (!USES_FULL_48BIT_PPGTT(vm->dev)) { |
1456 | gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m); | 1452 | gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m); |
1457 | } else { | 1453 | } else { |
1458 | uint64_t templ4, pml4e; | 1454 | uint64_t pml4e; |
1459 | struct i915_pml4 *pml4 = &ppgtt->pml4; | 1455 | struct i915_pml4 *pml4 = &ppgtt->pml4; |
1460 | struct i915_page_directory_pointer *pdp; | 1456 | struct i915_page_directory_pointer *pdp; |
1461 | 1457 | ||
1462 | gen8_for_each_pml4e(pdp, pml4, start, length, templ4, pml4e) { | 1458 | gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { |
1463 | if (!test_bit(pml4e, pml4->used_pml4es)) | 1459 | if (!test_bit(pml4e, pml4->used_pml4es)) |
1464 | continue; | 1460 | continue; |
1465 | 1461 | ||
@@ -2355,6 +2351,9 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, | |||
2355 | int i = 0; | 2351 | int i = 0; |
2356 | struct sg_page_iter sg_iter; | 2352 | struct sg_page_iter sg_iter; |
2357 | dma_addr_t addr = 0; /* shut up gcc */ | 2353 | dma_addr_t addr = 0; /* shut up gcc */ |
2354 | int rpm_atomic_seq; | ||
2355 | |||
2356 | rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); | ||
2358 | 2357 | ||
2359 | for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { | 2358 | for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { |
2360 | addr = sg_dma_address(sg_iter.sg) + | 2359 | addr = sg_dma_address(sg_iter.sg) + |
@@ -2381,6 +2380,8 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, | |||
2381 | */ | 2380 | */ |
2382 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); | 2381 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); |
2383 | POSTING_READ(GFX_FLSH_CNTL_GEN6); | 2382 | POSTING_READ(GFX_FLSH_CNTL_GEN6); |
2383 | |||
2384 | assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); | ||
2384 | } | 2385 | } |
2385 | 2386 | ||
2386 | /* | 2387 | /* |
@@ -2401,6 +2402,9 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, | |||
2401 | int i = 0; | 2402 | int i = 0; |
2402 | struct sg_page_iter sg_iter; | 2403 | struct sg_page_iter sg_iter; |
2403 | dma_addr_t addr = 0; | 2404 | dma_addr_t addr = 0; |
2405 | int rpm_atomic_seq; | ||
2406 | |||
2407 | rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); | ||
2404 | 2408 | ||
2405 | for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { | 2409 | for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { |
2406 | addr = sg_page_iter_dma_address(&sg_iter); | 2410 | addr = sg_page_iter_dma_address(&sg_iter); |
@@ -2425,6 +2429,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, | |||
2425 | */ | 2429 | */ |
2426 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); | 2430 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); |
2427 | POSTING_READ(GFX_FLSH_CNTL_GEN6); | 2431 | POSTING_READ(GFX_FLSH_CNTL_GEN6); |
2432 | |||
2433 | assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); | ||
2428 | } | 2434 | } |
2429 | 2435 | ||
2430 | static void gen8_ggtt_clear_range(struct i915_address_space *vm, | 2436 | static void gen8_ggtt_clear_range(struct i915_address_space *vm, |
@@ -2439,6 +2445,9 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, | |||
2439 | (gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; | 2445 | (gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; |
2440 | const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; | 2446 | const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; |
2441 | int i; | 2447 | int i; |
2448 | int rpm_atomic_seq; | ||
2449 | |||
2450 | rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); | ||
2442 | 2451 | ||
2443 | if (WARN(num_entries > max_entries, | 2452 | if (WARN(num_entries > max_entries, |
2444 | "First entry = %d; Num entries = %d (max=%d)\n", | 2453 | "First entry = %d; Num entries = %d (max=%d)\n", |
@@ -2451,6 +2460,8 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, | |||
2451 | for (i = 0; i < num_entries; i++) | 2460 | for (i = 0; i < num_entries; i++) |
2452 | gen8_set_pte(>t_base[i], scratch_pte); | 2461 | gen8_set_pte(>t_base[i], scratch_pte); |
2453 | readl(gtt_base); | 2462 | readl(gtt_base); |
2463 | |||
2464 | assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); | ||
2454 | } | 2465 | } |
2455 | 2466 | ||
2456 | static void gen6_ggtt_clear_range(struct i915_address_space *vm, | 2467 | static void gen6_ggtt_clear_range(struct i915_address_space *vm, |
@@ -2465,6 +2476,9 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, | |||
2465 | (gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; | 2476 | (gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; |
2466 | const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; | 2477 | const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; |
2467 | int i; | 2478 | int i; |
2479 | int rpm_atomic_seq; | ||
2480 | |||
2481 | rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); | ||
2468 | 2482 | ||
2469 | if (WARN(num_entries > max_entries, | 2483 | if (WARN(num_entries > max_entries, |
2470 | "First entry = %d; Num entries = %d (max=%d)\n", | 2484 | "First entry = %d; Num entries = %d (max=%d)\n", |
@@ -2477,6 +2491,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, | |||
2477 | for (i = 0; i < num_entries; i++) | 2491 | for (i = 0; i < num_entries; i++) |
2478 | iowrite32(scratch_pte, >t_base[i]); | 2492 | iowrite32(scratch_pte, >t_base[i]); |
2479 | readl(gtt_base); | 2493 | readl(gtt_base); |
2494 | |||
2495 | assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); | ||
2480 | } | 2496 | } |
2481 | 2497 | ||
2482 | static void i915_ggtt_insert_entries(struct i915_address_space *vm, | 2498 | static void i915_ggtt_insert_entries(struct i915_address_space *vm, |
@@ -2484,11 +2500,17 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm, | |||
2484 | uint64_t start, | 2500 | uint64_t start, |
2485 | enum i915_cache_level cache_level, u32 unused) | 2501 | enum i915_cache_level cache_level, u32 unused) |
2486 | { | 2502 | { |
2503 | struct drm_i915_private *dev_priv = vm->dev->dev_private; | ||
2487 | unsigned int flags = (cache_level == I915_CACHE_NONE) ? | 2504 | unsigned int flags = (cache_level == I915_CACHE_NONE) ? |
2488 | AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; | 2505 | AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; |
2506 | int rpm_atomic_seq; | ||
2507 | |||
2508 | rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); | ||
2489 | 2509 | ||
2490 | intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags); | 2510 | intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags); |
2491 | 2511 | ||
2512 | assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); | ||
2513 | |||
2492 | } | 2514 | } |
2493 | 2515 | ||
2494 | static void i915_ggtt_clear_range(struct i915_address_space *vm, | 2516 | static void i915_ggtt_clear_range(struct i915_address_space *vm, |
@@ -2496,9 +2518,16 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm, | |||
2496 | uint64_t length, | 2518 | uint64_t length, |
2497 | bool unused) | 2519 | bool unused) |
2498 | { | 2520 | { |
2521 | struct drm_i915_private *dev_priv = vm->dev->dev_private; | ||
2499 | unsigned first_entry = start >> PAGE_SHIFT; | 2522 | unsigned first_entry = start >> PAGE_SHIFT; |
2500 | unsigned num_entries = length >> PAGE_SHIFT; | 2523 | unsigned num_entries = length >> PAGE_SHIFT; |
2524 | int rpm_atomic_seq; | ||
2525 | |||
2526 | rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); | ||
2527 | |||
2501 | intel_gtt_clear_range(first_entry, num_entries); | 2528 | intel_gtt_clear_range(first_entry, num_entries); |
2529 | |||
2530 | assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); | ||
2502 | } | 2531 | } |
2503 | 2532 | ||
2504 | static int ggtt_bind_vma(struct i915_vma *vma, | 2533 | static int ggtt_bind_vma(struct i915_vma *vma, |
@@ -2699,6 +2728,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, | |||
2699 | return ret; | 2728 | return ret; |
2700 | } | 2729 | } |
2701 | vma->bound |= GLOBAL_BIND; | 2730 | vma->bound |= GLOBAL_BIND; |
2731 | __i915_vma_set_map_and_fenceable(vma); | ||
2702 | list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list); | 2732 | list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list); |
2703 | } | 2733 | } |
2704 | 2734 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 877c32c78a6a..b448ad832dcf 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h | |||
@@ -455,32 +455,29 @@ static inline uint32_t gen6_pde_index(uint32_t addr) | |||
455 | * between from start until start + length. On gen8+ it simply iterates | 455 | * between from start until start + length. On gen8+ it simply iterates |
456 | * over every page directory entry in a page directory. | 456 | * over every page directory entry in a page directory. |
457 | */ | 457 | */ |
458 | #define gen8_for_each_pde(pt, pd, start, length, temp, iter) \ | 458 | #define gen8_for_each_pde(pt, pd, start, length, iter) \ |
459 | for (iter = gen8_pde_index(start); \ | 459 | for (iter = gen8_pde_index(start); \ |
460 | length > 0 && iter < I915_PDES ? \ | 460 | length > 0 && iter < I915_PDES && \ |
461 | (pt = (pd)->page_table[iter]), 1 : 0; \ | 461 | (pt = (pd)->page_table[iter], true); \ |
462 | iter++, \ | 462 | ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT); \ |
463 | temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT) - start, \ | 463 | temp = min(temp - start, length); \ |
464 | temp = min(temp, length), \ | 464 | start += temp, length -= temp; }), ++iter) |
465 | start += temp, length -= temp) | 465 | |
466 | 466 | #define gen8_for_each_pdpe(pd, pdp, start, length, iter) \ | |
467 | #define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter) \ | 467 | for (iter = gen8_pdpe_index(start); \ |
468 | for (iter = gen8_pdpe_index(start); \ | 468 | length > 0 && iter < I915_PDPES_PER_PDP(dev) && \ |
469 | length > 0 && (iter < I915_PDPES_PER_PDP(dev)) ? \ | 469 | (pd = (pdp)->page_directory[iter], true); \ |
470 | (pd = (pdp)->page_directory[iter]), 1 : 0; \ | 470 | ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT); \ |
471 | iter++, \ | 471 | temp = min(temp - start, length); \ |
472 | temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT) - start, \ | 472 | start += temp, length -= temp; }), ++iter) |
473 | temp = min(temp, length), \ | 473 | |
474 | start += temp, length -= temp) | 474 | #define gen8_for_each_pml4e(pdp, pml4, start, length, iter) \ |
475 | 475 | for (iter = gen8_pml4e_index(start); \ | |
476 | #define gen8_for_each_pml4e(pdp, pml4, start, length, temp, iter) \ | 476 | length > 0 && iter < GEN8_PML4ES_PER_PML4 && \ |
477 | for (iter = gen8_pml4e_index(start); \ | 477 | (pdp = (pml4)->pdps[iter], true); \ |
478 | length > 0 && iter < GEN8_PML4ES_PER_PML4 ? \ | 478 | ({ u64 temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT); \ |
479 | (pdp = (pml4)->pdps[iter]), 1 : 0; \ | 479 | temp = min(temp - start, length); \ |
480 | iter++, \ | 480 | start += temp, length -= temp; }), ++iter) |
481 | temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT) - start, \ | ||
482 | temp = min(temp, length), \ | ||
483 | start += temp, length -= temp) | ||
484 | 481 | ||
485 | static inline uint32_t gen8_pte_index(uint64_t address) | 482 | static inline uint32_t gen8_pte_index(uint64_t address) |
486 | { | 483 | { |
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 5026a6267a88..fc7e6d5c6251 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c | |||
@@ -103,7 +103,7 @@ static int render_state_setup(struct render_state *so) | |||
103 | if (ret) | 103 | if (ret) |
104 | return ret; | 104 | return ret; |
105 | 105 | ||
106 | page = sg_page(so->obj->pages->sgl); | 106 | page = i915_gem_object_get_dirty_page(so->obj, 0); |
107 | d = kmap(page); | 107 | d = kmap(page); |
108 | 108 | ||
109 | while (i < rodata->batch_items) { | 109 | while (i < rodata->batch_items) { |
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 598ed2facf85..3476877fc0d6 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c | |||
@@ -688,6 +688,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, | |||
688 | } | 688 | } |
689 | 689 | ||
690 | vma->bound |= GLOBAL_BIND; | 690 | vma->bound |= GLOBAL_BIND; |
691 | __i915_vma_set_map_and_fenceable(vma); | ||
691 | list_add_tail(&vma->mm_list, &ggtt->inactive_list); | 692 | list_add_tail(&vma->mm_list, &ggtt->inactive_list); |
692 | } | 693 | } |
693 | 694 | ||
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 0d23785ba818..05aa7e61cbe0 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c | |||
@@ -568,7 +568,7 @@ static void lr_context_update(struct drm_i915_gem_request *rq) | |||
568 | WARN_ON(!i915_gem_obj_is_pinned(ctx_obj)); | 568 | WARN_ON(!i915_gem_obj_is_pinned(ctx_obj)); |
569 | WARN_ON(!i915_gem_obj_is_pinned(rb_obj)); | 569 | WARN_ON(!i915_gem_obj_is_pinned(rb_obj)); |
570 | 570 | ||
571 | page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); | 571 | page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); |
572 | reg_state = kmap_atomic(page); | 572 | reg_state = kmap_atomic(page); |
573 | 573 | ||
574 | reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj); | 574 | reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj); |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index e88d692583a5..3f8c753997ba 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -581,7 +581,7 @@ i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, | |||
581 | { | 581 | { |
582 | u32 enable_mask; | 582 | u32 enable_mask; |
583 | 583 | ||
584 | if (IS_VALLEYVIEW(dev_priv->dev)) | 584 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
585 | enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, | 585 | enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, |
586 | status_mask); | 586 | status_mask); |
587 | else | 587 | else |
@@ -595,7 +595,7 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, | |||
595 | { | 595 | { |
596 | u32 enable_mask; | 596 | u32 enable_mask; |
597 | 597 | ||
598 | if (IS_VALLEYVIEW(dev_priv->dev)) | 598 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
599 | enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, | 599 | enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, |
600 | status_mask); | 600 | status_mask); |
601 | else | 601 | else |
@@ -1103,6 +1103,14 @@ static void gen6_pm_rps_work(struct work_struct *work) | |||
1103 | spin_unlock_irq(&dev_priv->irq_lock); | 1103 | spin_unlock_irq(&dev_priv->irq_lock); |
1104 | return; | 1104 | return; |
1105 | } | 1105 | } |
1106 | |||
1107 | /* | ||
1108 | * The RPS work is synced during runtime suspend, we don't require a | ||
1109 | * wakeref. TODO: instead of disabling the asserts make sure that we | ||
1110 | * always hold an RPM reference while the work is running. | ||
1111 | */ | ||
1112 | DISABLE_RPM_WAKEREF_ASSERTS(dev_priv); | ||
1113 | |||
1106 | pm_iir = dev_priv->rps.pm_iir; | 1114 | pm_iir = dev_priv->rps.pm_iir; |
1107 | dev_priv->rps.pm_iir = 0; | 1115 | dev_priv->rps.pm_iir = 0; |
1108 | /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ | 1116 | /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ |
@@ -1115,7 +1123,7 @@ static void gen6_pm_rps_work(struct work_struct *work) | |||
1115 | WARN_ON(pm_iir & ~dev_priv->pm_rps_events); | 1123 | WARN_ON(pm_iir & ~dev_priv->pm_rps_events); |
1116 | 1124 | ||
1117 | if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) | 1125 | if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) |
1118 | return; | 1126 | goto out; |
1119 | 1127 | ||
1120 | mutex_lock(&dev_priv->rps.hw_lock); | 1128 | mutex_lock(&dev_priv->rps.hw_lock); |
1121 | 1129 | ||
@@ -1170,6 +1178,8 @@ static void gen6_pm_rps_work(struct work_struct *work) | |||
1170 | intel_set_rps(dev_priv->dev, new_delay); | 1178 | intel_set_rps(dev_priv->dev, new_delay); |
1171 | 1179 | ||
1172 | mutex_unlock(&dev_priv->rps.hw_lock); | 1180 | mutex_unlock(&dev_priv->rps.hw_lock); |
1181 | out: | ||
1182 | ENABLE_RPM_WAKEREF_ASSERTS(dev_priv); | ||
1173 | } | 1183 | } |
1174 | 1184 | ||
1175 | 1185 | ||
@@ -1723,7 +1733,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev) | |||
1723 | */ | 1733 | */ |
1724 | POSTING_READ(PORT_HOTPLUG_STAT); | 1734 | POSTING_READ(PORT_HOTPLUG_STAT); |
1725 | 1735 | ||
1726 | if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { | 1736 | if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
1727 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; | 1737 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; |
1728 | 1738 | ||
1729 | if (hotplug_trigger) { | 1739 | if (hotplug_trigger) { |
@@ -1758,6 +1768,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) | |||
1758 | if (!intel_irqs_enabled(dev_priv)) | 1768 | if (!intel_irqs_enabled(dev_priv)) |
1759 | return IRQ_NONE; | 1769 | return IRQ_NONE; |
1760 | 1770 | ||
1771 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ | ||
1772 | disable_rpm_wakeref_asserts(dev_priv); | ||
1773 | |||
1761 | while (true) { | 1774 | while (true) { |
1762 | /* Find, clear, then process each source of interrupt */ | 1775 | /* Find, clear, then process each source of interrupt */ |
1763 | 1776 | ||
@@ -1792,6 +1805,8 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) | |||
1792 | } | 1805 | } |
1793 | 1806 | ||
1794 | out: | 1807 | out: |
1808 | enable_rpm_wakeref_asserts(dev_priv); | ||
1809 | |||
1795 | return ret; | 1810 | return ret; |
1796 | } | 1811 | } |
1797 | 1812 | ||
@@ -1805,6 +1820,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) | |||
1805 | if (!intel_irqs_enabled(dev_priv)) | 1820 | if (!intel_irqs_enabled(dev_priv)) |
1806 | return IRQ_NONE; | 1821 | return IRQ_NONE; |
1807 | 1822 | ||
1823 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ | ||
1824 | disable_rpm_wakeref_asserts(dev_priv); | ||
1825 | |||
1808 | for (;;) { | 1826 | for (;;) { |
1809 | master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; | 1827 | master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; |
1810 | iir = I915_READ(VLV_IIR); | 1828 | iir = I915_READ(VLV_IIR); |
@@ -1835,6 +1853,8 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) | |||
1835 | POSTING_READ(GEN8_MASTER_IRQ); | 1853 | POSTING_READ(GEN8_MASTER_IRQ); |
1836 | } | 1854 | } |
1837 | 1855 | ||
1856 | enable_rpm_wakeref_asserts(dev_priv); | ||
1857 | |||
1838 | return ret; | 1858 | return ret; |
1839 | } | 1859 | } |
1840 | 1860 | ||
@@ -2165,6 +2185,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) | |||
2165 | if (!intel_irqs_enabled(dev_priv)) | 2185 | if (!intel_irqs_enabled(dev_priv)) |
2166 | return IRQ_NONE; | 2186 | return IRQ_NONE; |
2167 | 2187 | ||
2188 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ | ||
2189 | disable_rpm_wakeref_asserts(dev_priv); | ||
2190 | |||
2168 | /* We get interrupts on unclaimed registers, so check for this before we | 2191 | /* We get interrupts on unclaimed registers, so check for this before we |
2169 | * do any I915_{READ,WRITE}. */ | 2192 | * do any I915_{READ,WRITE}. */ |
2170 | intel_uncore_check_errors(dev); | 2193 | intel_uncore_check_errors(dev); |
@@ -2223,6 +2246,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) | |||
2223 | POSTING_READ(SDEIER); | 2246 | POSTING_READ(SDEIER); |
2224 | } | 2247 | } |
2225 | 2248 | ||
2249 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ | ||
2250 | enable_rpm_wakeref_asserts(dev_priv); | ||
2251 | |||
2226 | return ret; | 2252 | return ret; |
2227 | } | 2253 | } |
2228 | 2254 | ||
@@ -2255,6 +2281,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) | |||
2255 | if (!intel_irqs_enabled(dev_priv)) | 2281 | if (!intel_irqs_enabled(dev_priv)) |
2256 | return IRQ_NONE; | 2282 | return IRQ_NONE; |
2257 | 2283 | ||
2284 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ | ||
2285 | disable_rpm_wakeref_asserts(dev_priv); | ||
2286 | |||
2258 | if (INTEL_INFO(dev_priv)->gen >= 9) | 2287 | if (INTEL_INFO(dev_priv)->gen >= 9) |
2259 | aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | | 2288 | aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | |
2260 | GEN9_AUX_CHANNEL_D; | 2289 | GEN9_AUX_CHANNEL_D; |
@@ -2262,7 +2291,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) | |||
2262 | master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); | 2291 | master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); |
2263 | master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; | 2292 | master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; |
2264 | if (!master_ctl) | 2293 | if (!master_ctl) |
2265 | return IRQ_NONE; | 2294 | goto out; |
2266 | 2295 | ||
2267 | I915_WRITE_FW(GEN8_MASTER_IRQ, 0); | 2296 | I915_WRITE_FW(GEN8_MASTER_IRQ, 0); |
2268 | 2297 | ||
@@ -2393,6 +2422,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) | |||
2393 | I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); | 2422 | I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); |
2394 | POSTING_READ_FW(GEN8_MASTER_IRQ); | 2423 | POSTING_READ_FW(GEN8_MASTER_IRQ); |
2395 | 2424 | ||
2425 | out: | ||
2426 | enable_rpm_wakeref_asserts(dev_priv); | ||
2427 | |||
2396 | return ret; | 2428 | return ret; |
2397 | } | 2429 | } |
2398 | 2430 | ||
@@ -2989,6 +3021,13 @@ static void i915_hangcheck_elapsed(struct work_struct *work) | |||
2989 | if (!i915.enable_hangcheck) | 3021 | if (!i915.enable_hangcheck) |
2990 | return; | 3022 | return; |
2991 | 3023 | ||
3024 | /* | ||
3025 | * The hangcheck work is synced during runtime suspend, we don't | ||
3026 | * require a wakeref. TODO: instead of disabling the asserts make | ||
3027 | * sure that we hold a reference when this work is running. | ||
3028 | */ | ||
3029 | DISABLE_RPM_WAKEREF_ASSERTS(dev_priv); | ||
3030 | |||
2992 | for_each_ring(ring, dev_priv, i) { | 3031 | for_each_ring(ring, dev_priv, i) { |
2993 | u64 acthd; | 3032 | u64 acthd; |
2994 | u32 seqno; | 3033 | u32 seqno; |
@@ -3080,13 +3119,18 @@ static void i915_hangcheck_elapsed(struct work_struct *work) | |||
3080 | } | 3119 | } |
3081 | } | 3120 | } |
3082 | 3121 | ||
3083 | if (rings_hung) | 3122 | if (rings_hung) { |
3084 | return i915_handle_error(dev, true, "Ring hung"); | 3123 | i915_handle_error(dev, true, "Ring hung"); |
3124 | goto out; | ||
3125 | } | ||
3085 | 3126 | ||
3086 | if (busy_count) | 3127 | if (busy_count) |
3087 | /* Reset timer case chip hangs without another request | 3128 | /* Reset timer case chip hangs without another request |
3088 | * being added */ | 3129 | * being added */ |
3089 | i915_queue_hangcheck(dev); | 3130 | i915_queue_hangcheck(dev); |
3131 | |||
3132 | out: | ||
3133 | ENABLE_RPM_WAKEREF_ASSERTS(dev_priv); | ||
3090 | } | 3134 | } |
3091 | 3135 | ||
3092 | void i915_queue_hangcheck(struct drm_device *dev) | 3136 | void i915_queue_hangcheck(struct drm_device *dev) |
@@ -3878,13 +3922,18 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) | |||
3878 | u16 flip_mask = | 3922 | u16 flip_mask = |
3879 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | 3923 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
3880 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | 3924 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; |
3925 | irqreturn_t ret; | ||
3881 | 3926 | ||
3882 | if (!intel_irqs_enabled(dev_priv)) | 3927 | if (!intel_irqs_enabled(dev_priv)) |
3883 | return IRQ_NONE; | 3928 | return IRQ_NONE; |
3884 | 3929 | ||
3930 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ | ||
3931 | disable_rpm_wakeref_asserts(dev_priv); | ||
3932 | |||
3933 | ret = IRQ_NONE; | ||
3885 | iir = I915_READ16(IIR); | 3934 | iir = I915_READ16(IIR); |
3886 | if (iir == 0) | 3935 | if (iir == 0) |
3887 | return IRQ_NONE; | 3936 | goto out; |
3888 | 3937 | ||
3889 | while (iir & ~flip_mask) { | 3938 | while (iir & ~flip_mask) { |
3890 | /* Can't rely on pipestat interrupt bit in iir as it might | 3939 | /* Can't rely on pipestat interrupt bit in iir as it might |
@@ -3933,8 +3982,12 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) | |||
3933 | 3982 | ||
3934 | iir = new_iir; | 3983 | iir = new_iir; |
3935 | } | 3984 | } |
3985 | ret = IRQ_HANDLED; | ||
3986 | |||
3987 | out: | ||
3988 | enable_rpm_wakeref_asserts(dev_priv); | ||
3936 | 3989 | ||
3937 | return IRQ_HANDLED; | 3990 | return ret; |
3938 | } | 3991 | } |
3939 | 3992 | ||
3940 | static void i8xx_irq_uninstall(struct drm_device * dev) | 3993 | static void i8xx_irq_uninstall(struct drm_device * dev) |
@@ -4063,6 +4116,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) | |||
4063 | if (!intel_irqs_enabled(dev_priv)) | 4116 | if (!intel_irqs_enabled(dev_priv)) |
4064 | return IRQ_NONE; | 4117 | return IRQ_NONE; |
4065 | 4118 | ||
4119 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ | ||
4120 | disable_rpm_wakeref_asserts(dev_priv); | ||
4121 | |||
4066 | iir = I915_READ(IIR); | 4122 | iir = I915_READ(IIR); |
4067 | do { | 4123 | do { |
4068 | bool irq_received = (iir & ~flip_mask) != 0; | 4124 | bool irq_received = (iir & ~flip_mask) != 0; |
@@ -4145,6 +4201,8 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) | |||
4145 | iir = new_iir; | 4201 | iir = new_iir; |
4146 | } while (iir & ~flip_mask); | 4202 | } while (iir & ~flip_mask); |
4147 | 4203 | ||
4204 | enable_rpm_wakeref_asserts(dev_priv); | ||
4205 | |||
4148 | return ret; | 4206 | return ret; |
4149 | } | 4207 | } |
4150 | 4208 | ||
@@ -4284,6 +4342,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) | |||
4284 | if (!intel_irqs_enabled(dev_priv)) | 4342 | if (!intel_irqs_enabled(dev_priv)) |
4285 | return IRQ_NONE; | 4343 | return IRQ_NONE; |
4286 | 4344 | ||
4345 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ | ||
4346 | disable_rpm_wakeref_asserts(dev_priv); | ||
4347 | |||
4287 | iir = I915_READ(IIR); | 4348 | iir = I915_READ(IIR); |
4288 | 4349 | ||
4289 | for (;;) { | 4350 | for (;;) { |
@@ -4369,6 +4430,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) | |||
4369 | iir = new_iir; | 4430 | iir = new_iir; |
4370 | } | 4431 | } |
4371 | 4432 | ||
4433 | enable_rpm_wakeref_asserts(dev_priv); | ||
4434 | |||
4372 | return ret; | 4435 | return ret; |
4373 | } | 4436 | } |
4374 | 4437 | ||
@@ -4412,7 +4475,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) | |||
4412 | INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); | 4475 | INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); |
4413 | 4476 | ||
4414 | /* Let's track the enabled rps events */ | 4477 | /* Let's track the enabled rps events */ |
4415 | if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) | 4478 | if (IS_VALLEYVIEW(dev_priv)) |
4416 | /* WaGsvRC0ResidencyMethod:vlv */ | 4479 | /* WaGsvRC0ResidencyMethod:vlv */ |
4417 | dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED; | 4480 | dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED; |
4418 | else | 4481 | else |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 1dae5ac3e0b1..007ae83a4086 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -7327,6 +7327,7 @@ enum skl_disp_power_wells { | |||
7327 | #define SBI_READY (0x0<<0) | 7327 | #define SBI_READY (0x0<<0) |
7328 | 7328 | ||
7329 | /* SBI offsets */ | 7329 | /* SBI offsets */ |
7330 | #define SBI_SSCDIVINTPHASE 0x0200 | ||
7330 | #define SBI_SSCDIVINTPHASE6 0x0600 | 7331 | #define SBI_SSCDIVINTPHASE6 0x0600 |
7331 | #define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1) | 7332 | #define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1) |
7332 | #define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1) | 7333 | #define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1) |
@@ -7334,6 +7335,7 @@ enum skl_disp_power_wells { | |||
7334 | #define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8) | 7335 | #define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8) |
7335 | #define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15) | 7336 | #define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15) |
7336 | #define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0) | 7337 | #define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0) |
7338 | #define SBI_SSCDITHPHASE 0x0204 | ||
7337 | #define SBI_SSCCTL 0x020c | 7339 | #define SBI_SSCCTL 0x020c |
7338 | #define SBI_SSCCTL6 0x060C | 7340 | #define SBI_SSCCTL6 0x060C |
7339 | #define SBI_SSCCTL_PATHALT (1<<3) | 7341 | #define SBI_SSCCTL_PATHALT (1<<3) |
@@ -8100,9 +8102,7 @@ enum skl_disp_power_wells { | |||
8100 | #define RGB_FLIP_TO_BGR (1 << 2) | 8102 | #define RGB_FLIP_TO_BGR (1 << 2) |
8101 | 8103 | ||
8102 | #define BXT_PIPE_SELECT_MASK (7 << 7) | 8104 | #define BXT_PIPE_SELECT_MASK (7 << 7) |
8103 | #define BXT_PIPE_SELECT_C (2 << 7) | 8105 | #define BXT_PIPE_SELECT(pipe) ((pipe) << 7) |
8104 | #define BXT_PIPE_SELECT_B (1 << 7) | ||
8105 | #define BXT_PIPE_SELECT_A (0 << 7) | ||
8106 | 8106 | ||
8107 | #define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108) | 8107 | #define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108) |
8108 | #define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908) | 8108 | #define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908) |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 2d9182189422..a2aa09ce3202 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -49,7 +49,7 @@ static void i915_save_display(struct drm_device *dev) | |||
49 | dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); | 49 | dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); |
50 | dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); | 50 | dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); |
51 | dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); | 51 | dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); |
52 | } else if (!IS_VALLEYVIEW(dev)) { | 52 | } else if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { |
53 | dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); | 53 | dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); |
54 | dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); | 54 | dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); |
55 | dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); | 55 | dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); |
@@ -84,7 +84,7 @@ static void i915_restore_display(struct drm_device *dev) | |||
84 | I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); | 84 | I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); |
85 | I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); | 85 | I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); |
86 | I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL); | 86 | I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL); |
87 | } else if (!IS_VALLEYVIEW(dev)) { | 87 | } else if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { |
88 | I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); | 88 | I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); |
89 | I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); | 89 | I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); |
90 | I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); | 90 | I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); |
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index f929c61f0fa2..37e3f0ddf8e0 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c | |||
@@ -49,7 +49,7 @@ static u32 calc_residency(struct drm_device *dev, | |||
49 | intel_runtime_pm_get(dev_priv); | 49 | intel_runtime_pm_get(dev_priv); |
50 | 50 | ||
51 | /* On VLV and CHV, residency time is in CZ units rather than 1.28us */ | 51 | /* On VLV and CHV, residency time is in CZ units rather than 1.28us */ |
52 | if (IS_VALLEYVIEW(dev)) { | 52 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
53 | units = 1; | 53 | units = 1; |
54 | div = dev_priv->czclk_freq; | 54 | div = dev_priv->czclk_freq; |
55 | 55 | ||
@@ -284,7 +284,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, | |||
284 | intel_runtime_pm_get(dev_priv); | 284 | intel_runtime_pm_get(dev_priv); |
285 | 285 | ||
286 | mutex_lock(&dev_priv->rps.hw_lock); | 286 | mutex_lock(&dev_priv->rps.hw_lock); |
287 | if (IS_VALLEYVIEW(dev_priv->dev)) { | 287 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
288 | u32 freq; | 288 | u32 freq; |
289 | freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); | 289 | freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); |
290 | ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff); | 290 | ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff); |
@@ -598,7 +598,7 @@ void i915_setup_sysfs(struct drm_device *dev) | |||
598 | if (ret) | 598 | if (ret) |
599 | DRM_ERROR("RC6p residency sysfs setup failed\n"); | 599 | DRM_ERROR("RC6p residency sysfs setup failed\n"); |
600 | } | 600 | } |
601 | if (IS_VALLEYVIEW(dev)) { | 601 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
602 | ret = sysfs_merge_group(&dev->primary->kdev->kobj, | 602 | ret = sysfs_merge_group(&dev->primary->kdev->kobj, |
603 | &media_rc6_attr_group); | 603 | &media_rc6_attr_group); |
604 | if (ret) | 604 | if (ret) |
@@ -619,7 +619,7 @@ void i915_setup_sysfs(struct drm_device *dev) | |||
619 | } | 619 | } |
620 | 620 | ||
621 | ret = 0; | 621 | ret = 0; |
622 | if (IS_VALLEYVIEW(dev)) | 622 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
623 | ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs); | 623 | ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs); |
624 | else if (INTEL_INFO(dev)->gen >= 6) | 624 | else if (INTEL_INFO(dev)->gen >= 6) |
625 | ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs); | 625 | ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs); |
@@ -635,7 +635,7 @@ void i915_setup_sysfs(struct drm_device *dev) | |||
635 | void i915_teardown_sysfs(struct drm_device *dev) | 635 | void i915_teardown_sysfs(struct drm_device *dev) |
636 | { | 636 | { |
637 | sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr); | 637 | sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr); |
638 | if (IS_VALLEYVIEW(dev)) | 638 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
639 | sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs); | 639 | sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs); |
640 | else | 640 | else |
641 | sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs); | 641 | sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs); |
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index 643f342de33b..d0b1c9afa35e 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c | |||
@@ -95,6 +95,8 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc) | |||
95 | 95 | ||
96 | crtc_state->update_pipe = false; | 96 | crtc_state->update_pipe = false; |
97 | crtc_state->disable_lp_wm = false; | 97 | crtc_state->disable_lp_wm = false; |
98 | crtc_state->disable_cxsr = false; | ||
99 | crtc_state->wm_changed = false; | ||
98 | 100 | ||
99 | return &crtc_state->base; | 101 | return &crtc_state->base; |
100 | } | 102 | } |
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 9aa83e71b792..31f6d212fb1b 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c | |||
@@ -262,7 +262,8 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder) | |||
262 | tmp |= AUD_CONFIG_N_PROG_ENABLE; | 262 | tmp |= AUD_CONFIG_N_PROG_ENABLE; |
263 | tmp &= ~AUD_CONFIG_UPPER_N_MASK; | 263 | tmp &= ~AUD_CONFIG_UPPER_N_MASK; |
264 | tmp &= ~AUD_CONFIG_LOWER_N_MASK; | 264 | tmp &= ~AUD_CONFIG_LOWER_N_MASK; |
265 | if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) | 265 | if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT) || |
266 | intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DP_MST)) | ||
266 | tmp |= AUD_CONFIG_N_VALUE_INDEX; | 267 | tmp |= AUD_CONFIG_N_VALUE_INDEX; |
267 | I915_WRITE(HSW_AUD_CFG(pipe), tmp); | 268 | I915_WRITE(HSW_AUD_CFG(pipe), tmp); |
268 | 269 | ||
@@ -375,7 +376,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder) | |||
375 | if (HAS_PCH_IBX(dev_priv->dev)) { | 376 | if (HAS_PCH_IBX(dev_priv->dev)) { |
376 | aud_config = IBX_AUD_CFG(pipe); | 377 | aud_config = IBX_AUD_CFG(pipe); |
377 | aud_cntrl_st2 = IBX_AUD_CNTL_ST2; | 378 | aud_cntrl_st2 = IBX_AUD_CNTL_ST2; |
378 | } else if (IS_VALLEYVIEW(dev_priv)) { | 379 | } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
379 | aud_config = VLV_AUD_CFG(pipe); | 380 | aud_config = VLV_AUD_CFG(pipe); |
380 | aud_cntrl_st2 = VLV_AUD_CNTL_ST2; | 381 | aud_cntrl_st2 = VLV_AUD_CNTL_ST2; |
381 | } else { | 382 | } else { |
@@ -435,7 +436,8 @@ static void ilk_audio_codec_enable(struct drm_connector *connector, | |||
435 | aud_config = IBX_AUD_CFG(pipe); | 436 | aud_config = IBX_AUD_CFG(pipe); |
436 | aud_cntl_st = IBX_AUD_CNTL_ST(pipe); | 437 | aud_cntl_st = IBX_AUD_CNTL_ST(pipe); |
437 | aud_cntrl_st2 = IBX_AUD_CNTL_ST2; | 438 | aud_cntrl_st2 = IBX_AUD_CNTL_ST2; |
438 | } else if (IS_VALLEYVIEW(connector->dev)) { | 439 | } else if (IS_VALLEYVIEW(connector->dev) || |
440 | IS_CHERRYVIEW(connector->dev)) { | ||
439 | hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe); | 441 | hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe); |
440 | aud_config = VLV_AUD_CFG(pipe); | 442 | aud_config = VLV_AUD_CFG(pipe); |
441 | aud_cntl_st = VLV_AUD_CNTL_ST(pipe); | 443 | aud_cntl_st = VLV_AUD_CNTL_ST(pipe); |
@@ -474,7 +476,8 @@ static void ilk_audio_codec_enable(struct drm_connector *connector, | |||
474 | tmp &= ~AUD_CONFIG_N_VALUE_INDEX; | 476 | tmp &= ~AUD_CONFIG_N_VALUE_INDEX; |
475 | tmp &= ~AUD_CONFIG_N_PROG_ENABLE; | 477 | tmp &= ~AUD_CONFIG_N_PROG_ENABLE; |
476 | tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK; | 478 | tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK; |
477 | if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) | 479 | if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT) || |
480 | intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DP_MST)) | ||
478 | tmp |= AUD_CONFIG_N_VALUE_INDEX; | 481 | tmp |= AUD_CONFIG_N_VALUE_INDEX; |
479 | else | 482 | else |
480 | tmp |= audio_config_hdmi_pixel_clock(adjusted_mode); | 483 | tmp |= audio_config_hdmi_pixel_clock(adjusted_mode); |
@@ -512,7 +515,8 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder) | |||
512 | 515 | ||
513 | /* ELD Conn_Type */ | 516 | /* ELD Conn_Type */ |
514 | connector->eld[5] &= ~(3 << 2); | 517 | connector->eld[5] &= ~(3 << 2); |
515 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) | 518 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || |
519 | intel_pipe_has_type(crtc, INTEL_OUTPUT_DP_MST)) | ||
516 | connector->eld[5] |= (1 << 2); | 520 | connector->eld[5] |= (1 << 2); |
517 | 521 | ||
518 | connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; | 522 | connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; |
@@ -521,6 +525,10 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder) | |||
521 | dev_priv->display.audio_codec_enable(connector, intel_encoder, | 525 | dev_priv->display.audio_codec_enable(connector, intel_encoder, |
522 | adjusted_mode); | 526 | adjusted_mode); |
523 | 527 | ||
528 | mutex_lock(&dev_priv->av_mutex); | ||
529 | intel_dig_port->audio_connector = connector; | ||
530 | mutex_unlock(&dev_priv->av_mutex); | ||
531 | |||
524 | if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) | 532 | if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) |
525 | acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port); | 533 | acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port); |
526 | } | 534 | } |
@@ -544,6 +552,10 @@ void intel_audio_codec_disable(struct intel_encoder *intel_encoder) | |||
544 | if (dev_priv->display.audio_codec_disable) | 552 | if (dev_priv->display.audio_codec_disable) |
545 | dev_priv->display.audio_codec_disable(intel_encoder); | 553 | dev_priv->display.audio_codec_disable(intel_encoder); |
546 | 554 | ||
555 | mutex_lock(&dev_priv->av_mutex); | ||
556 | intel_dig_port->audio_connector = NULL; | ||
557 | mutex_unlock(&dev_priv->av_mutex); | ||
558 | |||
547 | if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) | 559 | if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) |
548 | acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port); | 560 | acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port); |
549 | } | 561 | } |
@@ -559,7 +571,7 @@ void intel_init_audio(struct drm_device *dev) | |||
559 | if (IS_G4X(dev)) { | 571 | if (IS_G4X(dev)) { |
560 | dev_priv->display.audio_codec_enable = g4x_audio_codec_enable; | 572 | dev_priv->display.audio_codec_enable = g4x_audio_codec_enable; |
561 | dev_priv->display.audio_codec_disable = g4x_audio_codec_disable; | 573 | dev_priv->display.audio_codec_disable = g4x_audio_codec_disable; |
562 | } else if (IS_VALLEYVIEW(dev)) { | 574 | } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
563 | dev_priv->display.audio_codec_enable = ilk_audio_codec_enable; | 575 | dev_priv->display.audio_codec_enable = ilk_audio_codec_enable; |
564 | dev_priv->display.audio_codec_disable = ilk_audio_codec_disable; | 576 | dev_priv->display.audio_codec_disable = ilk_audio_codec_disable; |
565 | } else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) { | 577 | } else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) { |
@@ -628,15 +640,14 @@ static int i915_audio_component_sync_audio_rate(struct device *dev, | |||
628 | int port, int rate) | 640 | int port, int rate) |
629 | { | 641 | { |
630 | struct drm_i915_private *dev_priv = dev_to_i915(dev); | 642 | struct drm_i915_private *dev_priv = dev_to_i915(dev); |
631 | struct drm_device *drm_dev = dev_priv->dev; | ||
632 | struct intel_encoder *intel_encoder; | 643 | struct intel_encoder *intel_encoder; |
633 | struct intel_digital_port *intel_dig_port; | ||
634 | struct intel_crtc *crtc; | 644 | struct intel_crtc *crtc; |
635 | struct drm_display_mode *mode; | 645 | struct drm_display_mode *mode; |
636 | struct i915_audio_component *acomp = dev_priv->audio_component; | 646 | struct i915_audio_component *acomp = dev_priv->audio_component; |
637 | enum pipe pipe = -1; | 647 | enum pipe pipe = INVALID_PIPE; |
638 | u32 tmp; | 648 | u32 tmp; |
639 | int n; | 649 | int n; |
650 | int err = 0; | ||
640 | 651 | ||
641 | /* HSW, BDW, SKL, KBL need this fix */ | 652 | /* HSW, BDW, SKL, KBL need this fix */ |
642 | if (!IS_SKYLAKE(dev_priv) && | 653 | if (!IS_SKYLAKE(dev_priv) && |
@@ -647,26 +658,22 @@ static int i915_audio_component_sync_audio_rate(struct device *dev, | |||
647 | 658 | ||
648 | mutex_lock(&dev_priv->av_mutex); | 659 | mutex_lock(&dev_priv->av_mutex); |
649 | /* 1. get the pipe */ | 660 | /* 1. get the pipe */ |
650 | for_each_intel_encoder(drm_dev, intel_encoder) { | 661 | intel_encoder = dev_priv->dig_port_map[port]; |
651 | if (intel_encoder->type != INTEL_OUTPUT_HDMI) | 662 | /* intel_encoder might be NULL for DP MST */ |
652 | continue; | 663 | if (!intel_encoder || !intel_encoder->base.crtc || |
653 | intel_dig_port = enc_to_dig_port(&intel_encoder->base); | 664 | intel_encoder->type != INTEL_OUTPUT_HDMI) { |
654 | if (port == intel_dig_port->port) { | 665 | DRM_DEBUG_KMS("no valid port %c\n", port_name(port)); |
655 | crtc = to_intel_crtc(intel_encoder->base.crtc); | 666 | err = -ENODEV; |
656 | if (!crtc) { | 667 | goto unlock; |
657 | DRM_DEBUG_KMS("%s: crtc is NULL\n", __func__); | ||
658 | continue; | ||
659 | } | ||
660 | pipe = crtc->pipe; | ||
661 | break; | ||
662 | } | ||
663 | } | 668 | } |
664 | 669 | crtc = to_intel_crtc(intel_encoder->base.crtc); | |
670 | pipe = crtc->pipe; | ||
665 | if (pipe == INVALID_PIPE) { | 671 | if (pipe == INVALID_PIPE) { |
666 | DRM_DEBUG_KMS("no pipe for the port %c\n", port_name(port)); | 672 | DRM_DEBUG_KMS("no pipe for the port %c\n", port_name(port)); |
667 | mutex_unlock(&dev_priv->av_mutex); | 673 | err = -ENODEV; |
668 | return -ENODEV; | 674 | goto unlock; |
669 | } | 675 | } |
676 | |||
670 | DRM_DEBUG_KMS("pipe %c connects port %c\n", | 677 | DRM_DEBUG_KMS("pipe %c connects port %c\n", |
671 | pipe_name(pipe), port_name(port)); | 678 | pipe_name(pipe), port_name(port)); |
672 | mode = &crtc->config->base.adjusted_mode; | 679 | mode = &crtc->config->base.adjusted_mode; |
@@ -679,8 +686,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev, | |||
679 | tmp = I915_READ(HSW_AUD_CFG(pipe)); | 686 | tmp = I915_READ(HSW_AUD_CFG(pipe)); |
680 | tmp &= ~AUD_CONFIG_N_PROG_ENABLE; | 687 | tmp &= ~AUD_CONFIG_N_PROG_ENABLE; |
681 | I915_WRITE(HSW_AUD_CFG(pipe), tmp); | 688 | I915_WRITE(HSW_AUD_CFG(pipe), tmp); |
682 | mutex_unlock(&dev_priv->av_mutex); | 689 | goto unlock; |
683 | return 0; | ||
684 | } | 690 | } |
685 | 691 | ||
686 | n = audio_config_get_n(mode, rate); | 692 | n = audio_config_get_n(mode, rate); |
@@ -690,8 +696,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev, | |||
690 | tmp = I915_READ(HSW_AUD_CFG(pipe)); | 696 | tmp = I915_READ(HSW_AUD_CFG(pipe)); |
691 | tmp &= ~AUD_CONFIG_N_PROG_ENABLE; | 697 | tmp &= ~AUD_CONFIG_N_PROG_ENABLE; |
692 | I915_WRITE(HSW_AUD_CFG(pipe), tmp); | 698 | I915_WRITE(HSW_AUD_CFG(pipe), tmp); |
693 | mutex_unlock(&dev_priv->av_mutex); | 699 | goto unlock; |
694 | return 0; | ||
695 | } | 700 | } |
696 | 701 | ||
697 | /* 3. set the N/CTS/M */ | 702 | /* 3. set the N/CTS/M */ |
@@ -699,8 +704,37 @@ static int i915_audio_component_sync_audio_rate(struct device *dev, | |||
699 | tmp = audio_config_setup_n_reg(n, tmp); | 704 | tmp = audio_config_setup_n_reg(n, tmp); |
700 | I915_WRITE(HSW_AUD_CFG(pipe), tmp); | 705 | I915_WRITE(HSW_AUD_CFG(pipe), tmp); |
701 | 706 | ||
707 | unlock: | ||
702 | mutex_unlock(&dev_priv->av_mutex); | 708 | mutex_unlock(&dev_priv->av_mutex); |
703 | return 0; | 709 | return err; |
710 | } | ||
711 | |||
712 | static int i915_audio_component_get_eld(struct device *dev, int port, | ||
713 | bool *enabled, | ||
714 | unsigned char *buf, int max_bytes) | ||
715 | { | ||
716 | struct drm_i915_private *dev_priv = dev_to_i915(dev); | ||
717 | struct intel_encoder *intel_encoder; | ||
718 | struct intel_digital_port *intel_dig_port; | ||
719 | const u8 *eld; | ||
720 | int ret = -EINVAL; | ||
721 | |||
722 | mutex_lock(&dev_priv->av_mutex); | ||
723 | intel_encoder = dev_priv->dig_port_map[port]; | ||
724 | /* intel_encoder might be NULL for DP MST */ | ||
725 | if (intel_encoder) { | ||
726 | ret = 0; | ||
727 | intel_dig_port = enc_to_dig_port(&intel_encoder->base); | ||
728 | *enabled = intel_dig_port->audio_connector != NULL; | ||
729 | if (*enabled) { | ||
730 | eld = intel_dig_port->audio_connector->eld; | ||
731 | ret = drm_eld_size(eld); | ||
732 | memcpy(buf, eld, min(max_bytes, ret)); | ||
733 | } | ||
734 | } | ||
735 | |||
736 | mutex_unlock(&dev_priv->av_mutex); | ||
737 | return ret; | ||
704 | } | 738 | } |
705 | 739 | ||
706 | static const struct i915_audio_component_ops i915_audio_component_ops = { | 740 | static const struct i915_audio_component_ops i915_audio_component_ops = { |
@@ -710,6 +744,7 @@ static const struct i915_audio_component_ops i915_audio_component_ops = { | |||
710 | .codec_wake_override = i915_audio_component_codec_wake_override, | 744 | .codec_wake_override = i915_audio_component_codec_wake_override, |
711 | .get_cdclk_freq = i915_audio_component_get_cdclk_freq, | 745 | .get_cdclk_freq = i915_audio_component_get_cdclk_freq, |
712 | .sync_audio_rate = i915_audio_component_sync_audio_rate, | 746 | .sync_audio_rate = i915_audio_component_sync_audio_rate, |
747 | .get_eld = i915_audio_component_get_eld, | ||
713 | }; | 748 | }; |
714 | 749 | ||
715 | static int i915_audio_component_bind(struct device *i915_dev, | 750 | static int i915_audio_component_bind(struct device *i915_dev, |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 070470fe9a91..eba3e0f87181 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -24,7 +24,7 @@ | |||
24 | * Eric Anholt <eric@anholt.net> | 24 | * Eric Anholt <eric@anholt.net> |
25 | * | 25 | * |
26 | */ | 26 | */ |
27 | #include <linux/dmi.h> | 27 | |
28 | #include <drm/drm_dp_helper.h> | 28 | #include <drm/drm_dp_helper.h> |
29 | #include <drm/drmP.h> | 29 | #include <drm/drmP.h> |
30 | #include <drm/i915_drm.h> | 30 | #include <drm/i915_drm.h> |
@@ -332,10 +332,10 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv, | |||
332 | drm_mode_debug_printmodeline(panel_fixed_mode); | 332 | drm_mode_debug_printmodeline(panel_fixed_mode); |
333 | } | 333 | } |
334 | 334 | ||
335 | static int intel_bios_ssc_frequency(struct drm_device *dev, | 335 | static int intel_bios_ssc_frequency(struct drm_i915_private *dev_priv, |
336 | bool alternate) | 336 | bool alternate) |
337 | { | 337 | { |
338 | switch (INTEL_INFO(dev)->gen) { | 338 | switch (INTEL_INFO(dev_priv)->gen) { |
339 | case 2: | 339 | case 2: |
340 | return alternate ? 66667 : 48000; | 340 | return alternate ? 66667 : 48000; |
341 | case 3: | 341 | case 3: |
@@ -350,29 +350,29 @@ static void | |||
350 | parse_general_features(struct drm_i915_private *dev_priv, | 350 | parse_general_features(struct drm_i915_private *dev_priv, |
351 | const struct bdb_header *bdb) | 351 | const struct bdb_header *bdb) |
352 | { | 352 | { |
353 | struct drm_device *dev = dev_priv->dev; | ||
354 | const struct bdb_general_features *general; | 353 | const struct bdb_general_features *general; |
355 | 354 | ||
356 | general = find_section(bdb, BDB_GENERAL_FEATURES); | 355 | general = find_section(bdb, BDB_GENERAL_FEATURES); |
357 | if (general) { | 356 | if (!general) |
358 | dev_priv->vbt.int_tv_support = general->int_tv_support; | 357 | return; |
359 | /* int_crt_support can't be trusted on earlier platforms */ | 358 | |
360 | if (bdb->version >= 155 && | 359 | dev_priv->vbt.int_tv_support = general->int_tv_support; |
361 | (HAS_DDI(dev_priv) || IS_VALLEYVIEW(dev_priv))) | 360 | /* int_crt_support can't be trusted on earlier platforms */ |
362 | dev_priv->vbt.int_crt_support = general->int_crt_support; | 361 | if (bdb->version >= 155 && |
363 | dev_priv->vbt.lvds_use_ssc = general->enable_ssc; | 362 | (HAS_DDI(dev_priv) || IS_VALLEYVIEW(dev_priv))) |
364 | dev_priv->vbt.lvds_ssc_freq = | 363 | dev_priv->vbt.int_crt_support = general->int_crt_support; |
365 | intel_bios_ssc_frequency(dev, general->ssc_freq); | 364 | dev_priv->vbt.lvds_use_ssc = general->enable_ssc; |
366 | dev_priv->vbt.display_clock_mode = general->display_clock_mode; | 365 | dev_priv->vbt.lvds_ssc_freq = |
367 | dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted; | 366 | intel_bios_ssc_frequency(dev_priv, general->ssc_freq); |
368 | DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n", | 367 | dev_priv->vbt.display_clock_mode = general->display_clock_mode; |
369 | dev_priv->vbt.int_tv_support, | 368 | dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted; |
370 | dev_priv->vbt.int_crt_support, | 369 | DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n", |
371 | dev_priv->vbt.lvds_use_ssc, | 370 | dev_priv->vbt.int_tv_support, |
372 | dev_priv->vbt.lvds_ssc_freq, | 371 | dev_priv->vbt.int_crt_support, |
373 | dev_priv->vbt.display_clock_mode, | 372 | dev_priv->vbt.lvds_use_ssc, |
374 | dev_priv->vbt.fdi_rx_polarity_inverted); | 373 | dev_priv->vbt.lvds_ssc_freq, |
375 | } | 374 | dev_priv->vbt.display_clock_mode, |
375 | dev_priv->vbt.fdi_rx_polarity_inverted); | ||
376 | } | 376 | } |
377 | 377 | ||
378 | static void | 378 | static void |
@@ -1057,10 +1057,9 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, | |||
1057 | static void parse_ddi_ports(struct drm_i915_private *dev_priv, | 1057 | static void parse_ddi_ports(struct drm_i915_private *dev_priv, |
1058 | const struct bdb_header *bdb) | 1058 | const struct bdb_header *bdb) |
1059 | { | 1059 | { |
1060 | struct drm_device *dev = dev_priv->dev; | ||
1061 | enum port port; | 1060 | enum port port; |
1062 | 1061 | ||
1063 | if (!HAS_DDI(dev)) | 1062 | if (!HAS_DDI(dev_priv)) |
1064 | return; | 1063 | return; |
1065 | 1064 | ||
1066 | if (!dev_priv->vbt.child_dev_num) | 1065 | if (!dev_priv->vbt.child_dev_num) |
@@ -1173,7 +1172,6 @@ parse_device_mapping(struct drm_i915_private *dev_priv, | |||
1173 | static void | 1172 | static void |
1174 | init_vbt_defaults(struct drm_i915_private *dev_priv) | 1173 | init_vbt_defaults(struct drm_i915_private *dev_priv) |
1175 | { | 1174 | { |
1176 | struct drm_device *dev = dev_priv->dev; | ||
1177 | enum port port; | 1175 | enum port port; |
1178 | 1176 | ||
1179 | dev_priv->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC; | 1177 | dev_priv->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC; |
@@ -1198,8 +1196,8 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) | |||
1198 | * Core/SandyBridge/IvyBridge use alternative (120MHz) reference | 1196 | * Core/SandyBridge/IvyBridge use alternative (120MHz) reference |
1199 | * clock for LVDS. | 1197 | * clock for LVDS. |
1200 | */ | 1198 | */ |
1201 | dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, | 1199 | dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev_priv, |
1202 | !HAS_PCH_SPLIT(dev)); | 1200 | !HAS_PCH_SPLIT(dev_priv)); |
1203 | DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq); | 1201 | DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq); |
1204 | 1202 | ||
1205 | for (port = PORT_A; port < I915_MAX_PORTS; port++) { | 1203 | for (port = PORT_A; port < I915_MAX_PORTS; port++) { |
@@ -1214,88 +1212,79 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) | |||
1214 | } | 1212 | } |
1215 | } | 1213 | } |
1216 | 1214 | ||
1217 | static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id) | 1215 | static const struct bdb_header *get_bdb_header(const struct vbt_header *vbt) |
1218 | { | 1216 | { |
1219 | DRM_DEBUG_KMS("Falling back to manually reading VBT from " | 1217 | const void *_vbt = vbt; |
1220 | "VBIOS ROM for %s\n", | 1218 | |
1221 | id->ident); | 1219 | return _vbt + vbt->bdb_offset; |
1222 | return 1; | ||
1223 | } | 1220 | } |
1224 | 1221 | ||
1225 | static const struct dmi_system_id intel_no_opregion_vbt[] = { | 1222 | /** |
1226 | { | 1223 | * intel_bios_is_valid_vbt - does the given buffer contain a valid VBT |
1227 | .callback = intel_no_opregion_vbt_callback, | 1224 | * @buf: pointer to a buffer to validate |
1228 | .ident = "ThinkCentre A57", | 1225 | * @size: size of the buffer |
1229 | .matches = { | 1226 | * |
1230 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | 1227 | * Returns true on valid VBT. |
1231 | DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"), | 1228 | */ |
1232 | }, | 1229 | bool intel_bios_is_valid_vbt(const void *buf, size_t size) |
1233 | }, | ||
1234 | { } | ||
1235 | }; | ||
1236 | |||
1237 | static const struct bdb_header *validate_vbt(const void *base, | ||
1238 | size_t size, | ||
1239 | const void *_vbt, | ||
1240 | const char *source) | ||
1241 | { | 1230 | { |
1242 | size_t offset = _vbt - base; | 1231 | const struct vbt_header *vbt = buf; |
1243 | const struct vbt_header *vbt = _vbt; | ||
1244 | const struct bdb_header *bdb; | 1232 | const struct bdb_header *bdb; |
1245 | 1233 | ||
1246 | if (offset + sizeof(struct vbt_header) > size) { | 1234 | if (!vbt) |
1235 | return false; | ||
1236 | |||
1237 | if (sizeof(struct vbt_header) > size) { | ||
1247 | DRM_DEBUG_DRIVER("VBT header incomplete\n"); | 1238 | DRM_DEBUG_DRIVER("VBT header incomplete\n"); |
1248 | return NULL; | 1239 | return false; |
1249 | } | 1240 | } |
1250 | 1241 | ||
1251 | if (memcmp(vbt->signature, "$VBT", 4)) { | 1242 | if (memcmp(vbt->signature, "$VBT", 4)) { |
1252 | DRM_DEBUG_DRIVER("VBT invalid signature\n"); | 1243 | DRM_DEBUG_DRIVER("VBT invalid signature\n"); |
1253 | return NULL; | 1244 | return false; |
1254 | } | 1245 | } |
1255 | 1246 | ||
1256 | offset += vbt->bdb_offset; | 1247 | if (vbt->bdb_offset + sizeof(struct bdb_header) > size) { |
1257 | if (offset + sizeof(struct bdb_header) > size) { | ||
1258 | DRM_DEBUG_DRIVER("BDB header incomplete\n"); | 1248 | DRM_DEBUG_DRIVER("BDB header incomplete\n"); |
1259 | return NULL; | 1249 | return false; |
1260 | } | 1250 | } |
1261 | 1251 | ||
1262 | bdb = base + offset; | 1252 | bdb = get_bdb_header(vbt); |
1263 | if (offset + bdb->bdb_size > size) { | 1253 | if (vbt->bdb_offset + bdb->bdb_size > size) { |
1264 | DRM_DEBUG_DRIVER("BDB incomplete\n"); | 1254 | DRM_DEBUG_DRIVER("BDB incomplete\n"); |
1265 | return NULL; | 1255 | return false; |
1266 | } | 1256 | } |
1267 | 1257 | ||
1268 | DRM_DEBUG_KMS("Using VBT from %s: %20s\n", | 1258 | return vbt; |
1269 | source, vbt->signature); | ||
1270 | return bdb; | ||
1271 | } | 1259 | } |
1272 | 1260 | ||
1273 | static const struct bdb_header *find_vbt(void __iomem *bios, size_t size) | 1261 | static const struct vbt_header *find_vbt(void __iomem *bios, size_t size) |
1274 | { | 1262 | { |
1275 | const struct bdb_header *bdb = NULL; | ||
1276 | size_t i; | 1263 | size_t i; |
1277 | 1264 | ||
1278 | /* Scour memory looking for the VBT signature. */ | 1265 | /* Scour memory looking for the VBT signature. */ |
1279 | for (i = 0; i + 4 < size; i++) { | 1266 | for (i = 0; i + 4 < size; i++) { |
1280 | if (ioread32(bios + i) == *((const u32 *) "$VBT")) { | 1267 | void *vbt; |
1281 | /* | ||
1282 | * This is the one place where we explicitly discard the | ||
1283 | * address space (__iomem) of the BIOS/VBT. From now on | ||
1284 | * everything is based on 'base', and treated as regular | ||
1285 | * memory. | ||
1286 | */ | ||
1287 | void *_bios = (void __force *) bios; | ||
1288 | 1268 | ||
1289 | bdb = validate_vbt(_bios, size, _bios + i, "PCI ROM"); | 1269 | if (ioread32(bios + i) != *((const u32 *) "$VBT")) |
1290 | break; | 1270 | continue; |
1291 | } | 1271 | |
1272 | /* | ||
1273 | * This is the one place where we explicitly discard the address | ||
1274 | * space (__iomem) of the BIOS/VBT. | ||
1275 | */ | ||
1276 | vbt = (void __force *) bios + i; | ||
1277 | if (intel_bios_is_valid_vbt(vbt, size - i)) | ||
1278 | return vbt; | ||
1279 | |||
1280 | break; | ||
1292 | } | 1281 | } |
1293 | 1282 | ||
1294 | return bdb; | 1283 | return NULL; |
1295 | } | 1284 | } |
1296 | 1285 | ||
1297 | /** | 1286 | /** |
1298 | * intel_parse_bios - find VBT and initialize settings from the BIOS | 1287 | * intel_bios_init - find VBT and initialize settings from the BIOS |
1299 | * @dev: DRM device | 1288 | * @dev: DRM device |
1300 | * | 1289 | * |
1301 | * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers | 1290 | * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers |
@@ -1304,37 +1293,39 @@ static const struct bdb_header *find_vbt(void __iomem *bios, size_t size) | |||
1304 | * Returns 0 on success, nonzero on failure. | 1293 | * Returns 0 on success, nonzero on failure. |
1305 | */ | 1294 | */ |
1306 | int | 1295 | int |
1307 | intel_parse_bios(struct drm_device *dev) | 1296 | intel_bios_init(struct drm_i915_private *dev_priv) |
1308 | { | 1297 | { |
1309 | struct drm_i915_private *dev_priv = dev->dev_private; | 1298 | struct pci_dev *pdev = dev_priv->dev->pdev; |
1310 | struct pci_dev *pdev = dev->pdev; | 1299 | const struct vbt_header *vbt = dev_priv->opregion.vbt; |
1311 | const struct bdb_header *bdb = NULL; | 1300 | const struct bdb_header *bdb; |
1312 | u8 __iomem *bios = NULL; | 1301 | u8 __iomem *bios = NULL; |
1313 | 1302 | ||
1314 | if (HAS_PCH_NOP(dev)) | 1303 | if (HAS_PCH_NOP(dev_priv)) |
1315 | return -ENODEV; | 1304 | return -ENODEV; |
1316 | 1305 | ||
1317 | init_vbt_defaults(dev_priv); | 1306 | init_vbt_defaults(dev_priv); |
1318 | 1307 | ||
1319 | /* XXX Should this validation be moved to intel_opregion.c? */ | 1308 | if (!vbt) { |
1320 | if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt) | ||
1321 | bdb = validate_vbt(dev_priv->opregion.header, OPREGION_SIZE, | ||
1322 | dev_priv->opregion.vbt, "OpRegion"); | ||
1323 | |||
1324 | if (bdb == NULL) { | ||
1325 | size_t size; | 1309 | size_t size; |
1326 | 1310 | ||
1327 | bios = pci_map_rom(pdev, &size); | 1311 | bios = pci_map_rom(pdev, &size); |
1328 | if (!bios) | 1312 | if (!bios) |
1329 | return -1; | 1313 | return -1; |
1330 | 1314 | ||
1331 | bdb = find_vbt(bios, size); | 1315 | vbt = find_vbt(bios, size); |
1332 | if (!bdb) { | 1316 | if (!vbt) { |
1333 | pci_unmap_rom(pdev, bios); | 1317 | pci_unmap_rom(pdev, bios); |
1334 | return -1; | 1318 | return -1; |
1335 | } | 1319 | } |
1320 | |||
1321 | DRM_DEBUG_KMS("Found valid VBT in PCI ROM\n"); | ||
1336 | } | 1322 | } |
1337 | 1323 | ||
1324 | bdb = get_bdb_header(vbt); | ||
1325 | |||
1326 | DRM_DEBUG_KMS("VBT signature \"%.*s\", BDB version %d\n", | ||
1327 | (int)sizeof(vbt->signature), vbt->signature, bdb->version); | ||
1328 | |||
1338 | /* Grab useful general definitions */ | 1329 | /* Grab useful general definitions */ |
1339 | parse_general_features(dev_priv, bdb); | 1330 | parse_general_features(dev_priv, bdb); |
1340 | parse_general_definitions(dev_priv, bdb); | 1331 | parse_general_definitions(dev_priv, bdb); |
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 7ec8c9aefb84..54eac1003a1e 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h | |||
@@ -28,8 +28,6 @@ | |||
28 | #ifndef _I830_BIOS_H_ | 28 | #ifndef _I830_BIOS_H_ |
29 | #define _I830_BIOS_H_ | 29 | #define _I830_BIOS_H_ |
30 | 30 | ||
31 | #include <drm/drmP.h> | ||
32 | |||
33 | struct vbt_header { | 31 | struct vbt_header { |
34 | u8 signature[20]; /**< Always starts with 'VBT$' */ | 32 | u8 signature[20]; /**< Always starts with 'VBT$' */ |
35 | u16 version; /**< decimal */ | 33 | u16 version; /**< decimal */ |
@@ -588,8 +586,6 @@ struct bdb_psr { | |||
588 | struct psr_table psr_table[16]; | 586 | struct psr_table psr_table[16]; |
589 | } __packed; | 587 | } __packed; |
590 | 588 | ||
591 | int intel_parse_bios(struct drm_device *dev); | ||
592 | |||
593 | /* | 589 | /* |
594 | * Driver<->VBIOS interaction occurs through scratch bits in | 590 | * Driver<->VBIOS interaction occurs through scratch bits in |
595 | * GR18 & SWF*. | 591 | * GR18 & SWF*. |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 9285fc1e64ee..9c89df1af036 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -844,7 +844,7 @@ void intel_crt_init(struct drm_device *dev) | |||
844 | crt->adpa_reg = adpa_reg; | 844 | crt->adpa_reg = adpa_reg; |
845 | 845 | ||
846 | crt->base.compute_config = intel_crt_compute_config; | 846 | crt->base.compute_config = intel_crt_compute_config; |
847 | if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev)) { | 847 | if (HAS_PCH_SPLIT(dev)) { |
848 | crt->base.disable = pch_disable_crt; | 848 | crt->base.disable = pch_disable_crt; |
849 | crt->base.post_disable = pch_post_disable_crt; | 849 | crt->base.post_disable = pch_post_disable_crt; |
850 | } else { | 850 | } else { |
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 6c6a6695e99c..9bb63a85997a 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c | |||
@@ -166,6 +166,14 @@ struct stepping_info { | |||
166 | char substepping; | 166 | char substepping; |
167 | }; | 167 | }; |
168 | 168 | ||
169 | /* | ||
170 | * Kabylake derivated from Skylake H0, so SKL H0 | ||
171 | * is the right firmware for KBL A0 (revid 0). | ||
172 | */ | ||
173 | static const struct stepping_info kbl_stepping_info[] = { | ||
174 | {'H', '0'}, {'I', '0'} | ||
175 | }; | ||
176 | |||
169 | static const struct stepping_info skl_stepping_info[] = { | 177 | static const struct stepping_info skl_stepping_info[] = { |
170 | {'A', '0'}, {'B', '0'}, {'C', '0'}, | 178 | {'A', '0'}, {'B', '0'}, {'C', '0'}, |
171 | {'D', '0'}, {'E', '0'}, {'F', '0'}, | 179 | {'D', '0'}, {'E', '0'}, {'F', '0'}, |
@@ -182,7 +190,10 @@ static const struct stepping_info *intel_get_stepping_info(struct drm_device *de | |||
182 | const struct stepping_info *si; | 190 | const struct stepping_info *si; |
183 | unsigned int size; | 191 | unsigned int size; |
184 | 192 | ||
185 | if (IS_SKYLAKE(dev)) { | 193 | if (IS_KABYLAKE(dev)) { |
194 | size = ARRAY_SIZE(kbl_stepping_info); | ||
195 | si = kbl_stepping_info; | ||
196 | } else if (IS_SKYLAKE(dev)) { | ||
186 | size = ARRAY_SIZE(skl_stepping_info); | 197 | size = ARRAY_SIZE(skl_stepping_info); |
187 | si = skl_stepping_info; | 198 | si = skl_stepping_info; |
188 | } else if (IS_BROXTON(dev)) { | 199 | } else if (IS_BROXTON(dev)) { |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 4afb3103eb96..e6408e5583d7 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -353,10 +353,10 @@ static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev, | |||
353 | { | 353 | { |
354 | const struct ddi_buf_trans *ddi_translations; | 354 | const struct ddi_buf_trans *ddi_translations; |
355 | 355 | ||
356 | if (IS_SKL_ULX(dev)) { | 356 | if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) { |
357 | ddi_translations = skl_y_ddi_translations_dp; | 357 | ddi_translations = skl_y_ddi_translations_dp; |
358 | *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp); | 358 | *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp); |
359 | } else if (IS_SKL_ULT(dev)) { | 359 | } else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) { |
360 | ddi_translations = skl_u_ddi_translations_dp; | 360 | ddi_translations = skl_u_ddi_translations_dp; |
361 | *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp); | 361 | *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp); |
362 | } else { | 362 | } else { |
@@ -373,7 +373,7 @@ static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev, | |||
373 | struct drm_i915_private *dev_priv = dev->dev_private; | 373 | struct drm_i915_private *dev_priv = dev->dev_private; |
374 | const struct ddi_buf_trans *ddi_translations; | 374 | const struct ddi_buf_trans *ddi_translations; |
375 | 375 | ||
376 | if (IS_SKL_ULX(dev)) { | 376 | if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) { |
377 | if (dev_priv->edp_low_vswing) { | 377 | if (dev_priv->edp_low_vswing) { |
378 | ddi_translations = skl_y_ddi_translations_edp; | 378 | ddi_translations = skl_y_ddi_translations_edp; |
379 | *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp); | 379 | *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp); |
@@ -381,7 +381,7 @@ static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev, | |||
381 | ddi_translations = skl_y_ddi_translations_dp; | 381 | ddi_translations = skl_y_ddi_translations_dp; |
382 | *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp); | 382 | *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp); |
383 | } | 383 | } |
384 | } else if (IS_SKL_ULT(dev)) { | 384 | } else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) { |
385 | if (dev_priv->edp_low_vswing) { | 385 | if (dev_priv->edp_low_vswing) { |
386 | ddi_translations = skl_u_ddi_translations_edp; | 386 | ddi_translations = skl_u_ddi_translations_edp; |
387 | *n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp); | 387 | *n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp); |
@@ -408,7 +408,7 @@ skl_get_buf_trans_hdmi(struct drm_device *dev, | |||
408 | { | 408 | { |
409 | const struct ddi_buf_trans *ddi_translations; | 409 | const struct ddi_buf_trans *ddi_translations; |
410 | 410 | ||
411 | if (IS_SKL_ULX(dev)) { | 411 | if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) { |
412 | ddi_translations = skl_y_ddi_translations_hdmi; | 412 | ddi_translations = skl_y_ddi_translations_hdmi; |
413 | *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi); | 413 | *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi); |
414 | } else { | 414 | } else { |
@@ -675,15 +675,16 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) | |||
675 | temp = I915_READ(DP_TP_STATUS(PORT_E)); | 675 | temp = I915_READ(DP_TP_STATUS(PORT_E)); |
676 | if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) { | 676 | if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) { |
677 | DRM_DEBUG_KMS("FDI link training done on step %d\n", i); | 677 | DRM_DEBUG_KMS("FDI link training done on step %d\n", i); |
678 | break; | ||
679 | } | ||
678 | 680 | ||
679 | /* Enable normal pixel sending for FDI */ | 681 | /* |
680 | I915_WRITE(DP_TP_CTL(PORT_E), | 682 | * Leave things enabled even if we failed to train FDI. |
681 | DP_TP_CTL_FDI_AUTOTRAIN | | 683 | * Results in less fireworks from the state checker. |
682 | DP_TP_CTL_LINK_TRAIN_NORMAL | | 684 | */ |
683 | DP_TP_CTL_ENHANCED_FRAME_ENABLE | | 685 | if (i == ARRAY_SIZE(hsw_ddi_translations_fdi) * 2 - 1) { |
684 | DP_TP_CTL_ENABLE); | 686 | DRM_ERROR("FDI link training failed!\n"); |
685 | 687 | break; | |
686 | return; | ||
687 | } | 688 | } |
688 | 689 | ||
689 | temp = I915_READ(DDI_BUF_CTL(PORT_E)); | 690 | temp = I915_READ(DDI_BUF_CTL(PORT_E)); |
@@ -712,7 +713,12 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) | |||
712 | POSTING_READ(FDI_RX_MISC(PIPE_A)); | 713 | POSTING_READ(FDI_RX_MISC(PIPE_A)); |
713 | } | 714 | } |
714 | 715 | ||
715 | DRM_ERROR("FDI link training failed!\n"); | 716 | /* Enable normal pixel sending for FDI */ |
717 | I915_WRITE(DP_TP_CTL(PORT_E), | ||
718 | DP_TP_CTL_FDI_AUTOTRAIN | | ||
719 | DP_TP_CTL_LINK_TRAIN_NORMAL | | ||
720 | DP_TP_CTL_ENHANCED_FRAME_ENABLE | | ||
721 | DP_TP_CTL_ENABLE); | ||
716 | } | 722 | } |
717 | 723 | ||
718 | void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder) | 724 | void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder) |
@@ -3108,6 +3114,19 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc) | |||
3108 | I915_WRITE(FDI_RX_CTL(PIPE_A), val); | 3114 | I915_WRITE(FDI_RX_CTL(PIPE_A), val); |
3109 | } | 3115 | } |
3110 | 3116 | ||
3117 | bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv, | ||
3118 | struct intel_crtc *intel_crtc) | ||
3119 | { | ||
3120 | u32 temp; | ||
3121 | |||
3122 | if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) { | ||
3123 | temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); | ||
3124 | if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe)) | ||
3125 | return true; | ||
3126 | } | ||
3127 | return false; | ||
3128 | } | ||
3129 | |||
3111 | void intel_ddi_get_config(struct intel_encoder *encoder, | 3130 | void intel_ddi_get_config(struct intel_encoder *encoder, |
3112 | struct intel_crtc_state *pipe_config) | 3131 | struct intel_crtc_state *pipe_config) |
3113 | { | 3132 | { |
@@ -3168,11 +3187,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder, | |||
3168 | break; | 3187 | break; |
3169 | } | 3188 | } |
3170 | 3189 | ||
3171 | if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) { | 3190 | pipe_config->has_audio = |
3172 | temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); | 3191 | intel_ddi_is_audio_enabled(dev_priv, intel_crtc); |
3173 | if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe)) | ||
3174 | pipe_config->has_audio = true; | ||
3175 | } | ||
3176 | 3192 | ||
3177 | if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp && | 3193 | if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp && |
3178 | pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) { | 3194 | pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) { |
@@ -3295,6 +3311,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port) | |||
3295 | intel_encoder->get_config = intel_ddi_get_config; | 3311 | intel_encoder->get_config = intel_ddi_get_config; |
3296 | 3312 | ||
3297 | intel_dig_port->port = port; | 3313 | intel_dig_port->port = port; |
3314 | dev_priv->dig_port_map[port] = intel_encoder; | ||
3298 | intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & | 3315 | intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & |
3299 | (DDI_BUF_PORT_REVERSAL | | 3316 | (DDI_BUF_PORT_REVERSAL | |
3300 | DDI_A_4_LANES); | 3317 | DDI_A_4_LANES); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index bda6b9c82e66..abd2d2944022 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -118,6 +118,7 @@ static void skylake_pfit_enable(struct intel_crtc *crtc); | |||
118 | static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); | 118 | static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); |
119 | static void ironlake_pfit_enable(struct intel_crtc *crtc); | 119 | static void ironlake_pfit_enable(struct intel_crtc *crtc); |
120 | static void intel_modeset_setup_hw_state(struct drm_device *dev); | 120 | static void intel_modeset_setup_hw_state(struct drm_device *dev); |
121 | static void intel_pre_disable_primary(struct drm_crtc *crtc); | ||
121 | 122 | ||
122 | typedef struct { | 123 | typedef struct { |
123 | int min, max; | 124 | int min, max; |
@@ -187,7 +188,7 @@ int intel_hrawclk(struct drm_device *dev) | |||
187 | uint32_t clkcfg; | 188 | uint32_t clkcfg; |
188 | 189 | ||
189 | /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ | 190 | /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ |
190 | if (IS_VALLEYVIEW(dev)) | 191 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
191 | return 200; | 192 | return 200; |
192 | 193 | ||
193 | clkcfg = I915_READ(CLKCFG); | 194 | clkcfg = I915_READ(CLKCFG); |
@@ -215,7 +216,7 @@ int intel_hrawclk(struct drm_device *dev) | |||
215 | 216 | ||
216 | static void intel_update_czclk(struct drm_i915_private *dev_priv) | 217 | static void intel_update_czclk(struct drm_i915_private *dev_priv) |
217 | { | 218 | { |
218 | if (!IS_VALLEYVIEW(dev_priv)) | 219 | if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) |
219 | return; | 220 | return; |
220 | 221 | ||
221 | dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", | 222 | dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", |
@@ -716,11 +717,12 @@ static bool intel_PLL_is_valid(struct drm_device *dev, | |||
716 | if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) | 717 | if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) |
717 | INTELPllInvalid("m1 out of range\n"); | 718 | INTELPllInvalid("m1 out of range\n"); |
718 | 719 | ||
719 | if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) | 720 | if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) && |
721 | !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) | ||
720 | if (clock->m1 <= clock->m2) | 722 | if (clock->m1 <= clock->m2) |
721 | INTELPllInvalid("m1 <= m2\n"); | 723 | INTELPllInvalid("m1 <= m2\n"); |
722 | 724 | ||
723 | if (!IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) { | 725 | if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) { |
724 | if (clock->p < limit->p.min || limit->p.max < clock->p) | 726 | if (clock->p < limit->p.min || limit->p.max < clock->p) |
725 | INTELPllInvalid("p out of range\n"); | 727 | INTELPllInvalid("p out of range\n"); |
726 | if (clock->m < limit->m.min || limit->m.max < clock->m) | 728 | if (clock->m < limit->m.min || limit->m.max < clock->m) |
@@ -1305,7 +1307,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, | |||
1305 | I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) | 1307 | I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) |
1306 | panel_pipe = PIPE_B; | 1308 | panel_pipe = PIPE_B; |
1307 | /* XXX: else fix for eDP */ | 1309 | /* XXX: else fix for eDP */ |
1308 | } else if (IS_VALLEYVIEW(dev)) { | 1310 | } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
1309 | /* presumably write lock depends on pipe, not port select */ | 1311 | /* presumably write lock depends on pipe, not port select */ |
1310 | pp_reg = VLV_PIPE_PP_CONTROL(pipe); | 1312 | pp_reg = VLV_PIPE_PP_CONTROL(pipe); |
1311 | panel_pipe = pipe; | 1313 | panel_pipe = pipe; |
@@ -1423,7 +1425,7 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv, | |||
1423 | "plane %d assertion failure, should be off on pipe %c but is still active\n", | 1425 | "plane %d assertion failure, should be off on pipe %c but is still active\n", |
1424 | sprite, pipe_name(pipe)); | 1426 | sprite, pipe_name(pipe)); |
1425 | } | 1427 | } |
1426 | } else if (IS_VALLEYVIEW(dev)) { | 1428 | } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
1427 | for_each_sprite(dev_priv, pipe, sprite) { | 1429 | for_each_sprite(dev_priv, pipe, sprite) { |
1428 | u32 val = I915_READ(SPCNTR(pipe, sprite)); | 1430 | u32 val = I915_READ(SPCNTR(pipe, sprite)); |
1429 | I915_STATE_WARN(val & SP_ENABLE, | 1431 | I915_STATE_WARN(val & SP_ENABLE, |
@@ -1606,9 +1608,6 @@ static void vlv_enable_pll(struct intel_crtc *crtc, | |||
1606 | 1608 | ||
1607 | assert_pipe_disabled(dev_priv, crtc->pipe); | 1609 | assert_pipe_disabled(dev_priv, crtc->pipe); |
1608 | 1610 | ||
1609 | /* No really, not for ILK+ */ | ||
1610 | BUG_ON(!IS_VALLEYVIEW(dev_priv->dev)); | ||
1611 | |||
1612 | /* PLL is protected by panel, make sure we can write it */ | 1611 | /* PLL is protected by panel, make sure we can write it */ |
1613 | if (IS_MOBILE(dev_priv->dev)) | 1612 | if (IS_MOBILE(dev_priv->dev)) |
1614 | assert_panel_unlocked(dev_priv, crtc->pipe); | 1613 | assert_panel_unlocked(dev_priv, crtc->pipe); |
@@ -1646,8 +1645,6 @@ static void chv_enable_pll(struct intel_crtc *crtc, | |||
1646 | 1645 | ||
1647 | assert_pipe_disabled(dev_priv, crtc->pipe); | 1646 | assert_pipe_disabled(dev_priv, crtc->pipe); |
1648 | 1647 | ||
1649 | BUG_ON(!IS_CHERRYVIEW(dev_priv->dev)); | ||
1650 | |||
1651 | mutex_lock(&dev_priv->sb_lock); | 1648 | mutex_lock(&dev_priv->sb_lock); |
1652 | 1649 | ||
1653 | /* Enable back the 10bit clock to display controller */ | 1650 | /* Enable back the 10bit clock to display controller */ |
@@ -2319,7 +2316,7 @@ static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv) | |||
2319 | if (INTEL_INFO(dev_priv)->gen >= 9) | 2316 | if (INTEL_INFO(dev_priv)->gen >= 9) |
2320 | return 256 * 1024; | 2317 | return 256 * 1024; |
2321 | else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) || | 2318 | else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) || |
2322 | IS_VALLEYVIEW(dev_priv)) | 2319 | IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
2323 | return 128 * 1024; | 2320 | return 128 * 1024; |
2324 | else if (INTEL_INFO(dev_priv)->gen >= 4) | 2321 | else if (INTEL_INFO(dev_priv)->gen >= 4) |
2325 | return 4 * 1024; | 2322 | return 4 * 1024; |
@@ -2599,6 +2596,8 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, | |||
2599 | struct drm_i915_gem_object *obj; | 2596 | struct drm_i915_gem_object *obj; |
2600 | struct drm_plane *primary = intel_crtc->base.primary; | 2597 | struct drm_plane *primary = intel_crtc->base.primary; |
2601 | struct drm_plane_state *plane_state = primary->state; | 2598 | struct drm_plane_state *plane_state = primary->state; |
2599 | struct drm_crtc_state *crtc_state = intel_crtc->base.state; | ||
2600 | struct intel_plane *intel_plane = to_intel_plane(primary); | ||
2602 | struct drm_framebuffer *fb; | 2601 | struct drm_framebuffer *fb; |
2603 | 2602 | ||
2604 | if (!plane_config->fb) | 2603 | if (!plane_config->fb) |
@@ -2635,6 +2634,18 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, | |||
2635 | } | 2634 | } |
2636 | } | 2635 | } |
2637 | 2636 | ||
2637 | /* | ||
2638 | * We've failed to reconstruct the BIOS FB. Current display state | ||
2639 | * indicates that the primary plane is visible, but has a NULL FB, | ||
2640 | * which will lead to problems later if we don't fix it up. The | ||
2641 | * simplest solution is to just disable the primary plane now and | ||
2642 | * pretend the BIOS never had it enabled. | ||
2643 | */ | ||
2644 | to_intel_plane_state(plane_state)->visible = false; | ||
2645 | crtc_state->plane_mask &= ~(1 << drm_plane_index(primary)); | ||
2646 | intel_pre_disable_primary(&intel_crtc->base); | ||
2647 | intel_plane->disable_plane(primary, &intel_crtc->base); | ||
2648 | |||
2638 | return; | 2649 | return; |
2639 | 2650 | ||
2640 | valid_fb: | 2651 | valid_fb: |
@@ -3940,6 +3951,21 @@ static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) | |||
3940 | return 0; | 3951 | return 0; |
3941 | } | 3952 | } |
3942 | 3953 | ||
3954 | static void lpt_disable_iclkip(struct drm_i915_private *dev_priv) | ||
3955 | { | ||
3956 | u32 temp; | ||
3957 | |||
3958 | I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); | ||
3959 | |||
3960 | mutex_lock(&dev_priv->sb_lock); | ||
3961 | |||
3962 | temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); | ||
3963 | temp |= SBI_SSCCTL_DISABLE; | ||
3964 | intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); | ||
3965 | |||
3966 | mutex_unlock(&dev_priv->sb_lock); | ||
3967 | } | ||
3968 | |||
3943 | /* Program iCLKIP clock to the desired frequency */ | 3969 | /* Program iCLKIP clock to the desired frequency */ |
3944 | static void lpt_program_iclkip(struct drm_crtc *crtc) | 3970 | static void lpt_program_iclkip(struct drm_crtc *crtc) |
3945 | { | 3971 | { |
@@ -3949,18 +3975,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc) | |||
3949 | u32 divsel, phaseinc, auxdiv, phasedir = 0; | 3975 | u32 divsel, phaseinc, auxdiv, phasedir = 0; |
3950 | u32 temp; | 3976 | u32 temp; |
3951 | 3977 | ||
3952 | mutex_lock(&dev_priv->sb_lock); | 3978 | lpt_disable_iclkip(dev_priv); |
3953 | |||
3954 | /* It is necessary to ungate the pixclk gate prior to programming | ||
3955 | * the divisors, and gate it back when it is done. | ||
3956 | */ | ||
3957 | I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); | ||
3958 | |||
3959 | /* Disable SSCCTL */ | ||
3960 | intel_sbi_write(dev_priv, SBI_SSCCTL6, | ||
3961 | intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) | | ||
3962 | SBI_SSCCTL_DISABLE, | ||
3963 | SBI_ICLK); | ||
3964 | 3979 | ||
3965 | /* 20MHz is a corner case which is out of range for the 7-bit divisor */ | 3980 | /* 20MHz is a corner case which is out of range for the 7-bit divisor */ |
3966 | if (clock == 20000) { | 3981 | if (clock == 20000) { |
@@ -3978,7 +3993,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc) | |||
3978 | u32 iclk_pi_range = 64; | 3993 | u32 iclk_pi_range = 64; |
3979 | u32 desired_divisor, msb_divisor_value, pi_value; | 3994 | u32 desired_divisor, msb_divisor_value, pi_value; |
3980 | 3995 | ||
3981 | desired_divisor = (iclk_virtual_root_freq / clock); | 3996 | desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, clock); |
3982 | msb_divisor_value = desired_divisor / iclk_pi_range; | 3997 | msb_divisor_value = desired_divisor / iclk_pi_range; |
3983 | pi_value = desired_divisor % iclk_pi_range; | 3998 | pi_value = desired_divisor % iclk_pi_range; |
3984 | 3999 | ||
@@ -4000,6 +4015,8 @@ static void lpt_program_iclkip(struct drm_crtc *crtc) | |||
4000 | phasedir, | 4015 | phasedir, |
4001 | phaseinc); | 4016 | phaseinc); |
4002 | 4017 | ||
4018 | mutex_lock(&dev_priv->sb_lock); | ||
4019 | |||
4003 | /* Program SSCDIVINTPHASE6 */ | 4020 | /* Program SSCDIVINTPHASE6 */ |
4004 | temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); | 4021 | temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); |
4005 | temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; | 4022 | temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; |
@@ -4021,12 +4038,12 @@ static void lpt_program_iclkip(struct drm_crtc *crtc) | |||
4021 | temp &= ~SBI_SSCCTL_DISABLE; | 4038 | temp &= ~SBI_SSCCTL_DISABLE; |
4022 | intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); | 4039 | intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); |
4023 | 4040 | ||
4041 | mutex_unlock(&dev_priv->sb_lock); | ||
4042 | |||
4024 | /* Wait for initialization time */ | 4043 | /* Wait for initialization time */ |
4025 | udelay(24); | 4044 | udelay(24); |
4026 | 4045 | ||
4027 | I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); | 4046 | I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); |
4028 | |||
4029 | mutex_unlock(&dev_priv->sb_lock); | ||
4030 | } | 4047 | } |
4031 | 4048 | ||
4032 | static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, | 4049 | static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, |
@@ -4709,14 +4726,6 @@ intel_post_enable_primary(struct drm_crtc *crtc) | |||
4709 | int pipe = intel_crtc->pipe; | 4726 | int pipe = intel_crtc->pipe; |
4710 | 4727 | ||
4711 | /* | 4728 | /* |
4712 | * BDW signals flip done immediately if the plane | ||
4713 | * is disabled, even if the plane enable is already | ||
4714 | * armed to occur at the next vblank :( | ||
4715 | */ | ||
4716 | if (IS_BROADWELL(dev)) | ||
4717 | intel_wait_for_vblank(dev, pipe); | ||
4718 | |||
4719 | /* | ||
4720 | * FIXME IPS should be fine as long as one plane is | 4729 | * FIXME IPS should be fine as long as one plane is |
4721 | * enabled, but in practice it seems to have problems | 4730 | * enabled, but in practice it seems to have problems |
4722 | * when going from primary only to sprite only and vice | 4731 | * when going from primary only to sprite only and vice |
@@ -4793,6 +4802,8 @@ intel_pre_disable_primary(struct drm_crtc *crtc) | |||
4793 | static void intel_post_plane_update(struct intel_crtc *crtc) | 4802 | static void intel_post_plane_update(struct intel_crtc *crtc) |
4794 | { | 4803 | { |
4795 | struct intel_crtc_atomic_commit *atomic = &crtc->atomic; | 4804 | struct intel_crtc_atomic_commit *atomic = &crtc->atomic; |
4805 | struct intel_crtc_state *pipe_config = | ||
4806 | to_intel_crtc_state(crtc->base.state); | ||
4796 | struct drm_device *dev = crtc->base.dev; | 4807 | struct drm_device *dev = crtc->base.dev; |
4797 | 4808 | ||
4798 | if (atomic->wait_vblank) | 4809 | if (atomic->wait_vblank) |
@@ -4800,10 +4811,9 @@ static void intel_post_plane_update(struct intel_crtc *crtc) | |||
4800 | 4811 | ||
4801 | intel_frontbuffer_flip(dev, atomic->fb_bits); | 4812 | intel_frontbuffer_flip(dev, atomic->fb_bits); |
4802 | 4813 | ||
4803 | if (atomic->disable_cxsr) | 4814 | crtc->wm.cxsr_allowed = true; |
4804 | crtc->wm.cxsr_allowed = true; | ||
4805 | 4815 | ||
4806 | if (crtc->atomic.update_wm_post) | 4816 | if (pipe_config->wm_changed && pipe_config->base.active) |
4807 | intel_update_watermarks(&crtc->base); | 4817 | intel_update_watermarks(&crtc->base); |
4808 | 4818 | ||
4809 | if (atomic->update_fbc) | 4819 | if (atomic->update_fbc) |
@@ -4820,6 +4830,8 @@ static void intel_pre_plane_update(struct intel_crtc *crtc) | |||
4820 | struct drm_device *dev = crtc->base.dev; | 4830 | struct drm_device *dev = crtc->base.dev; |
4821 | struct drm_i915_private *dev_priv = dev->dev_private; | 4831 | struct drm_i915_private *dev_priv = dev->dev_private; |
4822 | struct intel_crtc_atomic_commit *atomic = &crtc->atomic; | 4832 | struct intel_crtc_atomic_commit *atomic = &crtc->atomic; |
4833 | struct intel_crtc_state *pipe_config = | ||
4834 | to_intel_crtc_state(crtc->base.state); | ||
4823 | 4835 | ||
4824 | if (atomic->disable_fbc) | 4836 | if (atomic->disable_fbc) |
4825 | intel_fbc_deactivate(crtc); | 4837 | intel_fbc_deactivate(crtc); |
@@ -4830,10 +4842,13 @@ static void intel_pre_plane_update(struct intel_crtc *crtc) | |||
4830 | if (atomic->pre_disable_primary) | 4842 | if (atomic->pre_disable_primary) |
4831 | intel_pre_disable_primary(&crtc->base); | 4843 | intel_pre_disable_primary(&crtc->base); |
4832 | 4844 | ||
4833 | if (atomic->disable_cxsr) { | 4845 | if (pipe_config->disable_cxsr) { |
4834 | crtc->wm.cxsr_allowed = false; | 4846 | crtc->wm.cxsr_allowed = false; |
4835 | intel_set_memory_cxsr(dev_priv, false); | 4847 | intel_set_memory_cxsr(dev_priv, false); |
4836 | } | 4848 | } |
4849 | |||
4850 | if (!needs_modeset(&pipe_config->base) && pipe_config->wm_changed) | ||
4851 | intel_update_watermarks(&crtc->base); | ||
4837 | } | 4852 | } |
4838 | 4853 | ||
4839 | static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask) | 4854 | static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask) |
@@ -5166,18 +5181,18 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) | |||
5166 | if (!intel_crtc->config->has_dsi_encoder) | 5181 | if (!intel_crtc->config->has_dsi_encoder) |
5167 | intel_ddi_disable_pipe_clock(intel_crtc); | 5182 | intel_ddi_disable_pipe_clock(intel_crtc); |
5168 | 5183 | ||
5169 | if (intel_crtc->config->has_pch_encoder) { | ||
5170 | lpt_disable_pch_transcoder(dev_priv); | ||
5171 | intel_ddi_fdi_disable(crtc); | ||
5172 | } | ||
5173 | |||
5174 | for_each_encoder_on_crtc(dev, crtc, encoder) | 5184 | for_each_encoder_on_crtc(dev, crtc, encoder) |
5175 | if (encoder->post_disable) | 5185 | if (encoder->post_disable) |
5176 | encoder->post_disable(encoder); | 5186 | encoder->post_disable(encoder); |
5177 | 5187 | ||
5178 | if (intel_crtc->config->has_pch_encoder) | 5188 | if (intel_crtc->config->has_pch_encoder) { |
5189 | lpt_disable_pch_transcoder(dev_priv); | ||
5190 | lpt_disable_iclkip(dev_priv); | ||
5191 | intel_ddi_fdi_disable(crtc); | ||
5192 | |||
5179 | intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, | 5193 | intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, |
5180 | true); | 5194 | true); |
5195 | } | ||
5181 | 5196 | ||
5182 | intel_fbc_disable_crtc(intel_crtc); | 5197 | intel_fbc_disable_crtc(intel_crtc); |
5183 | } | 5198 | } |
@@ -5457,7 +5472,7 @@ static void intel_update_cdclk(struct drm_device *dev) | |||
5457 | * BSpec erroneously claims we should aim for 4MHz, but | 5472 | * BSpec erroneously claims we should aim for 4MHz, but |
5458 | * in fact 1MHz is the correct frequency. | 5473 | * in fact 1MHz is the correct frequency. |
5459 | */ | 5474 | */ |
5460 | if (IS_VALLEYVIEW(dev)) { | 5475 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
5461 | /* | 5476 | /* |
5462 | * Program the gmbus_freq based on the cdclk frequency. | 5477 | * Program the gmbus_freq based on the cdclk frequency. |
5463 | * BSpec erroneously claims we should aim for 4MHz, but | 5478 | * BSpec erroneously claims we should aim for 4MHz, but |
@@ -6354,9 +6369,11 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) | |||
6354 | WARN_ON(intel_crtc->unpin_work); | 6369 | WARN_ON(intel_crtc->unpin_work); |
6355 | 6370 | ||
6356 | intel_pre_disable_primary(crtc); | 6371 | intel_pre_disable_primary(crtc); |
6372 | |||
6373 | intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary)); | ||
6374 | to_intel_plane_state(crtc->primary->state)->visible = false; | ||
6357 | } | 6375 | } |
6358 | 6376 | ||
6359 | intel_crtc_disable_planes(crtc, crtc->state->plane_mask); | ||
6360 | dev_priv->display.crtc_disable(crtc); | 6377 | dev_priv->display.crtc_disable(crtc); |
6361 | intel_crtc->active = false; | 6378 | intel_crtc->active = false; |
6362 | intel_update_watermarks(crtc); | 6379 | intel_update_watermarks(crtc); |
@@ -7182,7 +7199,7 @@ static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state, | |||
7182 | 7199 | ||
7183 | WARN_ON(!crtc_state->base.state); | 7200 | WARN_ON(!crtc_state->base.state); |
7184 | 7201 | ||
7185 | if (IS_VALLEYVIEW(dev) || IS_BROXTON(dev)) { | 7202 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) { |
7186 | refclk = 100000; | 7203 | refclk = 100000; |
7187 | } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && | 7204 | } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && |
7188 | intel_panel_use_ssc(dev_priv) && num_connectors < 2) { | 7205 | intel_panel_use_ssc(dev_priv) && num_connectors < 2) { |
@@ -7881,7 +7898,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) | |||
7881 | pipeconf |= PIPECONF_DOUBLE_WIDE; | 7898 | pipeconf |= PIPECONF_DOUBLE_WIDE; |
7882 | 7899 | ||
7883 | /* only g4x and later have fancy bpc/dither controls */ | 7900 | /* only g4x and later have fancy bpc/dither controls */ |
7884 | if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { | 7901 | if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
7885 | /* Bspec claims that we can't use dithering for 30bpp pipes. */ | 7902 | /* Bspec claims that we can't use dithering for 30bpp pipes. */ |
7886 | if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30) | 7903 | if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30) |
7887 | pipeconf |= PIPECONF_DITHER_EN | | 7904 | pipeconf |= PIPECONF_DITHER_EN | |
@@ -7921,7 +7938,8 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) | |||
7921 | } else | 7938 | } else |
7922 | pipeconf |= PIPECONF_PROGRESSIVE; | 7939 | pipeconf |= PIPECONF_PROGRESSIVE; |
7923 | 7940 | ||
7924 | if (IS_VALLEYVIEW(dev) && intel_crtc->config->limited_color_range) | 7941 | if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && |
7942 | intel_crtc->config->limited_color_range) | ||
7925 | pipeconf |= PIPECONF_COLOR_RANGE_SELECT; | 7943 | pipeconf |= PIPECONF_COLOR_RANGE_SELECT; |
7926 | 7944 | ||
7927 | I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf); | 7945 | I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf); |
@@ -8168,7 +8186,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, | |||
8168 | if (!(tmp & PIPECONF_ENABLE)) | 8186 | if (!(tmp & PIPECONF_ENABLE)) |
8169 | return false; | 8187 | return false; |
8170 | 8188 | ||
8171 | if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { | 8189 | if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
8172 | switch (tmp & PIPECONF_BPC_MASK) { | 8190 | switch (tmp & PIPECONF_BPC_MASK) { |
8173 | case PIPECONF_6BPC: | 8191 | case PIPECONF_6BPC: |
8174 | pipe_config->pipe_bpp = 18; | 8192 | pipe_config->pipe_bpp = 18; |
@@ -8184,7 +8202,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, | |||
8184 | } | 8202 | } |
8185 | } | 8203 | } |
8186 | 8204 | ||
8187 | if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT)) | 8205 | if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && |
8206 | (tmp & PIPECONF_COLOR_RANGE_SELECT)) | ||
8188 | pipe_config->limited_color_range = true; | 8207 | pipe_config->limited_color_range = true; |
8189 | 8208 | ||
8190 | if (INTEL_INFO(dev)->gen < 4) | 8209 | if (INTEL_INFO(dev)->gen < 4) |
@@ -8212,7 +8231,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, | |||
8212 | pipe_config->pixel_multiplier = 1; | 8231 | pipe_config->pixel_multiplier = 1; |
8213 | } | 8232 | } |
8214 | pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); | 8233 | pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); |
8215 | if (!IS_VALLEYVIEW(dev)) { | 8234 | if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { |
8216 | /* | 8235 | /* |
8217 | * DPLL_DVO_2X_MODE must be enabled for both DPLLs | 8236 | * DPLL_DVO_2X_MODE must be enabled for both DPLLs |
8218 | * on 830. Filter it out here so that we don't | 8237 | * on 830. Filter it out here so that we don't |
@@ -8564,6 +8583,67 @@ static void lpt_disable_clkout_dp(struct drm_device *dev) | |||
8564 | mutex_unlock(&dev_priv->sb_lock); | 8583 | mutex_unlock(&dev_priv->sb_lock); |
8565 | } | 8584 | } |
8566 | 8585 | ||
8586 | #define BEND_IDX(steps) ((50 + (steps)) / 5) | ||
8587 | |||
8588 | static const uint16_t sscdivintphase[] = { | ||
8589 | [BEND_IDX( 50)] = 0x3B23, | ||
8590 | [BEND_IDX( 45)] = 0x3B23, | ||
8591 | [BEND_IDX( 40)] = 0x3C23, | ||
8592 | [BEND_IDX( 35)] = 0x3C23, | ||
8593 | [BEND_IDX( 30)] = 0x3D23, | ||
8594 | [BEND_IDX( 25)] = 0x3D23, | ||
8595 | [BEND_IDX( 20)] = 0x3E23, | ||
8596 | [BEND_IDX( 15)] = 0x3E23, | ||
8597 | [BEND_IDX( 10)] = 0x3F23, | ||
8598 | [BEND_IDX( 5)] = 0x3F23, | ||
8599 | [BEND_IDX( 0)] = 0x0025, | ||
8600 | [BEND_IDX( -5)] = 0x0025, | ||
8601 | [BEND_IDX(-10)] = 0x0125, | ||
8602 | [BEND_IDX(-15)] = 0x0125, | ||
8603 | [BEND_IDX(-20)] = 0x0225, | ||
8604 | [BEND_IDX(-25)] = 0x0225, | ||
8605 | [BEND_IDX(-30)] = 0x0325, | ||
8606 | [BEND_IDX(-35)] = 0x0325, | ||
8607 | [BEND_IDX(-40)] = 0x0425, | ||
8608 | [BEND_IDX(-45)] = 0x0425, | ||
8609 | [BEND_IDX(-50)] = 0x0525, | ||
8610 | }; | ||
8611 | |||
8612 | /* | ||
8613 | * Bend CLKOUT_DP | ||
8614 | * steps -50 to 50 inclusive, in steps of 5 | ||
8615 | * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) | ||
8616 | * change in clock period = -(steps / 10) * 5.787 ps | ||
8617 | */ | ||
8618 | static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) | ||
8619 | { | ||
8620 | uint32_t tmp; | ||
8621 | int idx = BEND_IDX(steps); | ||
8622 | |||
8623 | if (WARN_ON(steps % 5 != 0)) | ||
8624 | return; | ||
8625 | |||
8626 | if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase))) | ||
8627 | return; | ||
8628 | |||
8629 | mutex_lock(&dev_priv->sb_lock); | ||
8630 | |||
8631 | if (steps % 10 != 0) | ||
8632 | tmp = 0xAAAAAAAB; | ||
8633 | else | ||
8634 | tmp = 0x00000000; | ||
8635 | intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); | ||
8636 | |||
8637 | tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); | ||
8638 | tmp &= 0xffff0000; | ||
8639 | tmp |= sscdivintphase[idx]; | ||
8640 | intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); | ||
8641 | |||
8642 | mutex_unlock(&dev_priv->sb_lock); | ||
8643 | } | ||
8644 | |||
8645 | #undef BEND_IDX | ||
8646 | |||
8567 | static void lpt_init_pch_refclk(struct drm_device *dev) | 8647 | static void lpt_init_pch_refclk(struct drm_device *dev) |
8568 | { | 8648 | { |
8569 | struct intel_encoder *encoder; | 8649 | struct intel_encoder *encoder; |
@@ -8579,10 +8659,12 @@ static void lpt_init_pch_refclk(struct drm_device *dev) | |||
8579 | } | 8659 | } |
8580 | } | 8660 | } |
8581 | 8661 | ||
8582 | if (has_vga) | 8662 | if (has_vga) { |
8663 | lpt_bend_clkout_dp(to_i915(dev), 0); | ||
8583 | lpt_enable_clkout_dp(dev, true, true); | 8664 | lpt_enable_clkout_dp(dev, true, true); |
8584 | else | 8665 | } else { |
8585 | lpt_disable_clkout_dp(dev); | 8666 | lpt_disable_clkout_dp(dev); |
8667 | } | ||
8586 | } | 8668 | } |
8587 | 8669 | ||
8588 | /* | 8670 | /* |
@@ -9946,14 +10028,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, | |||
9946 | return true; | 10028 | return true; |
9947 | } | 10029 | } |
9948 | 10030 | ||
9949 | static void i845_update_cursor(struct drm_crtc *crtc, u32 base) | 10031 | static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on) |
9950 | { | 10032 | { |
9951 | struct drm_device *dev = crtc->dev; | 10033 | struct drm_device *dev = crtc->dev; |
9952 | struct drm_i915_private *dev_priv = dev->dev_private; | 10034 | struct drm_i915_private *dev_priv = dev->dev_private; |
9953 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 10035 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
9954 | uint32_t cntl = 0, size = 0; | 10036 | uint32_t cntl = 0, size = 0; |
9955 | 10037 | ||
9956 | if (base) { | 10038 | if (on) { |
9957 | unsigned int width = intel_crtc->base.cursor->state->crtc_w; | 10039 | unsigned int width = intel_crtc->base.cursor->state->crtc_w; |
9958 | unsigned int height = intel_crtc->base.cursor->state->crtc_h; | 10040 | unsigned int height = intel_crtc->base.cursor->state->crtc_h; |
9959 | unsigned int stride = roundup_pow_of_two(width) * 4; | 10041 | unsigned int stride = roundup_pow_of_two(width) * 4; |
@@ -10008,16 +10090,15 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base) | |||
10008 | } | 10090 | } |
10009 | } | 10091 | } |
10010 | 10092 | ||
10011 | static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) | 10093 | static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on) |
10012 | { | 10094 | { |
10013 | struct drm_device *dev = crtc->dev; | 10095 | struct drm_device *dev = crtc->dev; |
10014 | struct drm_i915_private *dev_priv = dev->dev_private; | 10096 | struct drm_i915_private *dev_priv = dev->dev_private; |
10015 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 10097 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
10016 | int pipe = intel_crtc->pipe; | 10098 | int pipe = intel_crtc->pipe; |
10017 | uint32_t cntl; | 10099 | uint32_t cntl = 0; |
10018 | 10100 | ||
10019 | cntl = 0; | 10101 | if (on) { |
10020 | if (base) { | ||
10021 | cntl = MCURSOR_GAMMA_ENABLE; | 10102 | cntl = MCURSOR_GAMMA_ENABLE; |
10022 | switch (intel_crtc->base.cursor->state->crtc_w) { | 10103 | switch (intel_crtc->base.cursor->state->crtc_w) { |
10023 | case 64: | 10104 | case 64: |
@@ -10068,18 +10149,17 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, | |||
10068 | int y = cursor_state->crtc_y; | 10149 | int y = cursor_state->crtc_y; |
10069 | u32 base = 0, pos = 0; | 10150 | u32 base = 0, pos = 0; |
10070 | 10151 | ||
10071 | if (on) | 10152 | base = intel_crtc->cursor_addr; |
10072 | base = intel_crtc->cursor_addr; | ||
10073 | 10153 | ||
10074 | if (x >= intel_crtc->config->pipe_src_w) | 10154 | if (x >= intel_crtc->config->pipe_src_w) |
10075 | base = 0; | 10155 | on = false; |
10076 | 10156 | ||
10077 | if (y >= intel_crtc->config->pipe_src_h) | 10157 | if (y >= intel_crtc->config->pipe_src_h) |
10078 | base = 0; | 10158 | on = false; |
10079 | 10159 | ||
10080 | if (x < 0) { | 10160 | if (x < 0) { |
10081 | if (x + cursor_state->crtc_w <= 0) | 10161 | if (x + cursor_state->crtc_w <= 0) |
10082 | base = 0; | 10162 | on = false; |
10083 | 10163 | ||
10084 | pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; | 10164 | pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; |
10085 | x = -x; | 10165 | x = -x; |
@@ -10088,16 +10168,13 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, | |||
10088 | 10168 | ||
10089 | if (y < 0) { | 10169 | if (y < 0) { |
10090 | if (y + cursor_state->crtc_h <= 0) | 10170 | if (y + cursor_state->crtc_h <= 0) |
10091 | base = 0; | 10171 | on = false; |
10092 | 10172 | ||
10093 | pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; | 10173 | pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; |
10094 | y = -y; | 10174 | y = -y; |
10095 | } | 10175 | } |
10096 | pos |= y << CURSOR_Y_SHIFT; | 10176 | pos |= y << CURSOR_Y_SHIFT; |
10097 | 10177 | ||
10098 | if (base == 0 && intel_crtc->cursor_base == 0) | ||
10099 | return; | ||
10100 | |||
10101 | I915_WRITE(CURPOS(pipe), pos); | 10178 | I915_WRITE(CURPOS(pipe), pos); |
10102 | 10179 | ||
10103 | /* ILK+ do this automagically */ | 10180 | /* ILK+ do this automagically */ |
@@ -10108,9 +10185,9 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, | |||
10108 | } | 10185 | } |
10109 | 10186 | ||
10110 | if (IS_845G(dev) || IS_I865G(dev)) | 10187 | if (IS_845G(dev) || IS_I865G(dev)) |
10111 | i845_update_cursor(crtc, base); | 10188 | i845_update_cursor(crtc, base, on); |
10112 | else | 10189 | else |
10113 | i9xx_update_cursor(crtc, base); | 10190 | i9xx_update_cursor(crtc, base, on); |
10114 | } | 10191 | } |
10115 | 10192 | ||
10116 | static bool cursor_size_ok(struct drm_device *dev, | 10193 | static bool cursor_size_ok(struct drm_device *dev, |
@@ -11539,7 +11616,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
11539 | if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) | 11616 | if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) |
11540 | work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1; | 11617 | work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1; |
11541 | 11618 | ||
11542 | if (IS_VALLEYVIEW(dev)) { | 11619 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
11543 | ring = &dev_priv->ring[BCS]; | 11620 | ring = &dev_priv->ring[BCS]; |
11544 | if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode) | 11621 | if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode) |
11545 | /* vlv: DISPLAY_FLIP fails to change tiling */ | 11622 | /* vlv: DISPLAY_FLIP fails to change tiling */ |
@@ -11695,9 +11772,14 @@ static bool intel_wm_need_update(struct drm_plane *plane, | |||
11695 | struct intel_plane_state *cur = to_intel_plane_state(plane->state); | 11772 | struct intel_plane_state *cur = to_intel_plane_state(plane->state); |
11696 | 11773 | ||
11697 | /* Update watermarks on tiling or size changes. */ | 11774 | /* Update watermarks on tiling or size changes. */ |
11698 | if (!plane->state->fb || !state->fb || | 11775 | if (new->visible != cur->visible) |
11699 | plane->state->fb->modifier[0] != state->fb->modifier[0] || | 11776 | return true; |
11700 | plane->state->rotation != state->rotation || | 11777 | |
11778 | if (!cur->base.fb || !new->base.fb) | ||
11779 | return false; | ||
11780 | |||
11781 | if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] || | ||
11782 | cur->base.rotation != new->base.rotation || | ||
11701 | drm_rect_width(&new->src) != drm_rect_width(&cur->src) || | 11783 | drm_rect_width(&new->src) != drm_rect_width(&cur->src) || |
11702 | drm_rect_height(&new->src) != drm_rect_height(&cur->src) || | 11784 | drm_rect_height(&new->src) != drm_rect_height(&cur->src) || |
11703 | drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) || | 11785 | drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) || |
@@ -11720,6 +11802,7 @@ static bool needs_scaling(struct intel_plane_state *state) | |||
11720 | int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, | 11802 | int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, |
11721 | struct drm_plane_state *plane_state) | 11803 | struct drm_plane_state *plane_state) |
11722 | { | 11804 | { |
11805 | struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state); | ||
11723 | struct drm_crtc *crtc = crtc_state->crtc; | 11806 | struct drm_crtc *crtc = crtc_state->crtc; |
11724 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 11807 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
11725 | struct drm_plane *plane = plane_state->plane; | 11808 | struct drm_plane *plane = plane_state->plane; |
@@ -11766,25 +11849,17 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, | |||
11766 | plane->base.id, was_visible, visible, | 11849 | plane->base.id, was_visible, visible, |
11767 | turn_off, turn_on, mode_changed); | 11850 | turn_off, turn_on, mode_changed); |
11768 | 11851 | ||
11769 | if (turn_on) { | 11852 | if (turn_on || turn_off) { |
11770 | intel_crtc->atomic.update_wm_pre = true; | 11853 | pipe_config->wm_changed = true; |
11771 | /* must disable cxsr around plane enable/disable */ | 11854 | |
11772 | if (plane->type != DRM_PLANE_TYPE_CURSOR) { | ||
11773 | intel_crtc->atomic.disable_cxsr = true; | ||
11774 | /* to potentially re-enable cxsr */ | ||
11775 | intel_crtc->atomic.wait_vblank = true; | ||
11776 | intel_crtc->atomic.update_wm_post = true; | ||
11777 | } | ||
11778 | } else if (turn_off) { | ||
11779 | intel_crtc->atomic.update_wm_post = true; | ||
11780 | /* must disable cxsr around plane enable/disable */ | 11855 | /* must disable cxsr around plane enable/disable */ |
11781 | if (plane->type != DRM_PLANE_TYPE_CURSOR) { | 11856 | if (plane->type != DRM_PLANE_TYPE_CURSOR) { |
11782 | if (is_crtc_enabled) | 11857 | if (is_crtc_enabled) |
11783 | intel_crtc->atomic.wait_vblank = true; | 11858 | intel_crtc->atomic.wait_vblank = true; |
11784 | intel_crtc->atomic.disable_cxsr = true; | 11859 | pipe_config->disable_cxsr = true; |
11785 | } | 11860 | } |
11786 | } else if (intel_wm_need_update(plane, plane_state)) { | 11861 | } else if (intel_wm_need_update(plane, plane_state)) { |
11787 | intel_crtc->atomic.update_wm_pre = true; | 11862 | pipe_config->wm_changed = true; |
11788 | } | 11863 | } |
11789 | 11864 | ||
11790 | if (visible || was_visible) | 11865 | if (visible || was_visible) |
@@ -11929,7 +12004,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, | |||
11929 | } | 12004 | } |
11930 | 12005 | ||
11931 | if (mode_changed && !crtc_state->active) | 12006 | if (mode_changed && !crtc_state->active) |
11932 | intel_crtc->atomic.update_wm_post = true; | 12007 | pipe_config->wm_changed = true; |
11933 | 12008 | ||
11934 | if (mode_changed && crtc_state->enable && | 12009 | if (mode_changed && crtc_state->enable && |
11935 | dev_priv->display.crtc_compute_clock && | 12010 | dev_priv->display.crtc_compute_clock && |
@@ -12020,7 +12095,7 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc, | |||
12020 | struct drm_connector_state *connector_state; | 12095 | struct drm_connector_state *connector_state; |
12021 | int bpp, i; | 12096 | int bpp, i; |
12022 | 12097 | ||
12023 | if ((IS_G4X(dev) || IS_VALLEYVIEW(dev))) | 12098 | if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))) |
12024 | bpp = 10*3; | 12099 | bpp = 10*3; |
12025 | else if (INTEL_INFO(dev)->gen >= 5) | 12100 | else if (INTEL_INFO(dev)->gen >= 5) |
12026 | bpp = 12*3; | 12101 | bpp = 12*3; |
@@ -12630,7 +12705,7 @@ intel_pipe_config_compare(struct drm_device *dev, | |||
12630 | PIPE_CONF_CHECK_I(pixel_multiplier); | 12705 | PIPE_CONF_CHECK_I(pixel_multiplier); |
12631 | PIPE_CONF_CHECK_I(has_hdmi_sink); | 12706 | PIPE_CONF_CHECK_I(has_hdmi_sink); |
12632 | if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) || | 12707 | if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) || |
12633 | IS_VALLEYVIEW(dev)) | 12708 | IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
12634 | PIPE_CONF_CHECK_I(limited_color_range); | 12709 | PIPE_CONF_CHECK_I(limited_color_range); |
12635 | PIPE_CONF_CHECK_I(has_infoframe); | 12710 | PIPE_CONF_CHECK_I(has_infoframe); |
12636 | 12711 | ||
@@ -13416,6 +13491,9 @@ static int intel_atomic_commit(struct drm_device *dev, | |||
13416 | */ | 13491 | */ |
13417 | intel_check_cpu_fifo_underruns(dev_priv); | 13492 | intel_check_cpu_fifo_underruns(dev_priv); |
13418 | intel_check_pch_fifo_underruns(dev_priv); | 13493 | intel_check_pch_fifo_underruns(dev_priv); |
13494 | |||
13495 | if (!crtc->state->active) | ||
13496 | intel_update_watermarks(crtc); | ||
13419 | } | 13497 | } |
13420 | } | 13498 | } |
13421 | 13499 | ||
@@ -13851,9 +13929,6 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc, | |||
13851 | to_intel_crtc_state(old_crtc_state); | 13929 | to_intel_crtc_state(old_crtc_state); |
13852 | bool modeset = needs_modeset(crtc->state); | 13930 | bool modeset = needs_modeset(crtc->state); |
13853 | 13931 | ||
13854 | if (intel_crtc->atomic.update_wm_pre) | ||
13855 | intel_update_watermarks(crtc); | ||
13856 | |||
13857 | /* Perform vblank evasion around commit operation */ | 13932 | /* Perform vblank evasion around commit operation */ |
13858 | intel_pipe_update_start(intel_crtc); | 13933 | intel_pipe_update_start(intel_crtc); |
13859 | 13934 | ||
@@ -14040,9 +14115,6 @@ intel_commit_cursor_plane(struct drm_plane *plane, | |||
14040 | crtc = crtc ? crtc : plane->crtc; | 14115 | crtc = crtc ? crtc : plane->crtc; |
14041 | intel_crtc = to_intel_crtc(crtc); | 14116 | intel_crtc = to_intel_crtc(crtc); |
14042 | 14117 | ||
14043 | if (intel_crtc->cursor_bo == obj) | ||
14044 | goto update; | ||
14045 | |||
14046 | if (!obj) | 14118 | if (!obj) |
14047 | addr = 0; | 14119 | addr = 0; |
14048 | else if (!INTEL_INFO(dev)->cursor_needs_physical) | 14120 | else if (!INTEL_INFO(dev)->cursor_needs_physical) |
@@ -14051,9 +14123,7 @@ intel_commit_cursor_plane(struct drm_plane *plane, | |||
14051 | addr = obj->phys_handle->busaddr; | 14123 | addr = obj->phys_handle->busaddr; |
14052 | 14124 | ||
14053 | intel_crtc->cursor_addr = addr; | 14125 | intel_crtc->cursor_addr = addr; |
14054 | intel_crtc->cursor_bo = obj; | ||
14055 | 14126 | ||
14056 | update: | ||
14057 | intel_crtc_update_cursor(crtc, state->visible); | 14127 | intel_crtc_update_cursor(crtc, state->visible); |
14058 | } | 14128 | } |
14059 | 14129 | ||
@@ -14382,7 +14452,7 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
14382 | 14452 | ||
14383 | if (I915_READ(PCH_DP_D) & DP_DETECTED) | 14453 | if (I915_READ(PCH_DP_D) & DP_DETECTED) |
14384 | intel_dp_init(dev, PCH_DP_D, PORT_D); | 14454 | intel_dp_init(dev, PCH_DP_D, PORT_D); |
14385 | } else if (IS_VALLEYVIEW(dev)) { | 14455 | } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
14386 | /* | 14456 | /* |
14387 | * The DP_DETECTED bit is the latched state of the DDC | 14457 | * The DP_DETECTED bit is the latched state of the DDC |
14388 | * SDA pin at boot. However since eDP doesn't require DDC | 14458 | * SDA pin at boot. However since eDP doesn't require DDC |
@@ -14531,7 +14601,7 @@ u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier, | |||
14531 | * pixels and 32K bytes." | 14601 | * pixels and 32K bytes." |
14532 | */ | 14602 | */ |
14533 | return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768); | 14603 | return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768); |
14534 | } else if (gen >= 5 && !IS_VALLEYVIEW(dev)) { | 14604 | } else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { |
14535 | return 32*1024; | 14605 | return 32*1024; |
14536 | } else if (gen >= 4) { | 14606 | } else if (gen >= 4) { |
14537 | if (fb_modifier == I915_FORMAT_MOD_X_TILED) | 14607 | if (fb_modifier == I915_FORMAT_MOD_X_TILED) |
@@ -14635,7 +14705,8 @@ static int intel_framebuffer_init(struct drm_device *dev, | |||
14635 | } | 14705 | } |
14636 | break; | 14706 | break; |
14637 | case DRM_FORMAT_ABGR8888: | 14707 | case DRM_FORMAT_ABGR8888: |
14638 | if (!IS_VALLEYVIEW(dev) && INTEL_INFO(dev)->gen < 9) { | 14708 | if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && |
14709 | INTEL_INFO(dev)->gen < 9) { | ||
14639 | DRM_DEBUG("unsupported pixel format: %s\n", | 14710 | DRM_DEBUG("unsupported pixel format: %s\n", |
14640 | drm_get_format_name(mode_cmd->pixel_format)); | 14711 | drm_get_format_name(mode_cmd->pixel_format)); |
14641 | return -EINVAL; | 14712 | return -EINVAL; |
@@ -14651,7 +14722,7 @@ static int intel_framebuffer_init(struct drm_device *dev, | |||
14651 | } | 14722 | } |
14652 | break; | 14723 | break; |
14653 | case DRM_FORMAT_ABGR2101010: | 14724 | case DRM_FORMAT_ABGR2101010: |
14654 | if (!IS_VALLEYVIEW(dev)) { | 14725 | if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { |
14655 | DRM_DEBUG("unsupported pixel format: %s\n", | 14726 | DRM_DEBUG("unsupported pixel format: %s\n", |
14656 | drm_get_format_name(mode_cmd->pixel_format)); | 14727 | drm_get_format_name(mode_cmd->pixel_format)); |
14657 | return -EINVAL; | 14728 | return -EINVAL; |
@@ -14779,7 +14850,7 @@ static void intel_init_display(struct drm_device *dev) | |||
14779 | dev_priv->display.crtc_disable = ironlake_crtc_disable; | 14850 | dev_priv->display.crtc_disable = ironlake_crtc_disable; |
14780 | dev_priv->display.update_primary_plane = | 14851 | dev_priv->display.update_primary_plane = |
14781 | ironlake_update_primary_plane; | 14852 | ironlake_update_primary_plane; |
14782 | } else if (IS_VALLEYVIEW(dev)) { | 14853 | } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
14783 | dev_priv->display.get_pipe_config = i9xx_get_pipe_config; | 14854 | dev_priv->display.get_pipe_config = i9xx_get_pipe_config; |
14784 | dev_priv->display.get_initial_plane_config = | 14855 | dev_priv->display.get_initial_plane_config = |
14785 | i9xx_get_initial_plane_config; | 14856 | i9xx_get_initial_plane_config; |
@@ -14812,7 +14883,7 @@ static void intel_init_display(struct drm_device *dev) | |||
14812 | else if (IS_HASWELL(dev)) | 14883 | else if (IS_HASWELL(dev)) |
14813 | dev_priv->display.get_display_clock_speed = | 14884 | dev_priv->display.get_display_clock_speed = |
14814 | haswell_get_display_clock_speed; | 14885 | haswell_get_display_clock_speed; |
14815 | else if (IS_VALLEYVIEW(dev)) | 14886 | else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
14816 | dev_priv->display.get_display_clock_speed = | 14887 | dev_priv->display.get_display_clock_speed = |
14817 | valleyview_get_display_clock_speed; | 14888 | valleyview_get_display_clock_speed; |
14818 | else if (IS_GEN5(dev)) | 14889 | else if (IS_GEN5(dev)) |
@@ -14870,7 +14941,7 @@ static void intel_init_display(struct drm_device *dev) | |||
14870 | dev_priv->display.modeset_calc_cdclk = | 14941 | dev_priv->display.modeset_calc_cdclk = |
14871 | broadwell_modeset_calc_cdclk; | 14942 | broadwell_modeset_calc_cdclk; |
14872 | } | 14943 | } |
14873 | } else if (IS_VALLEYVIEW(dev)) { | 14944 | } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
14874 | dev_priv->display.modeset_commit_cdclk = | 14945 | dev_priv->display.modeset_commit_cdclk = |
14875 | valleyview_modeset_commit_cdclk; | 14946 | valleyview_modeset_commit_cdclk; |
14876 | dev_priv->display.modeset_calc_cdclk = | 14947 | dev_priv->display.modeset_calc_cdclk = |
@@ -15644,7 +15715,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev) | |||
15644 | pll->on = false; | 15715 | pll->on = false; |
15645 | } | 15716 | } |
15646 | 15717 | ||
15647 | if (IS_VALLEYVIEW(dev)) | 15718 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
15648 | vlv_wm_get_hw_state(dev); | 15719 | vlv_wm_get_hw_state(dev); |
15649 | else if (IS_GEN9(dev)) | 15720 | else if (IS_GEN9(dev)) |
15650 | skl_wm_get_hw_state(dev); | 15721 | skl_wm_get_hw_state(dev); |
@@ -15767,7 +15838,7 @@ void intel_connector_unregister(struct intel_connector *intel_connector) | |||
15767 | void intel_modeset_cleanup(struct drm_device *dev) | 15838 | void intel_modeset_cleanup(struct drm_device *dev) |
15768 | { | 15839 | { |
15769 | struct drm_i915_private *dev_priv = dev->dev_private; | 15840 | struct drm_i915_private *dev_priv = dev->dev_private; |
15770 | struct drm_connector *connector; | 15841 | struct intel_connector *connector; |
15771 | 15842 | ||
15772 | intel_disable_gt_powersave(dev); | 15843 | intel_disable_gt_powersave(dev); |
15773 | 15844 | ||
@@ -15794,12 +15865,8 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
15794 | flush_scheduled_work(); | 15865 | flush_scheduled_work(); |
15795 | 15866 | ||
15796 | /* destroy the backlight and sysfs files before encoders/connectors */ | 15867 | /* destroy the backlight and sysfs files before encoders/connectors */ |
15797 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 15868 | for_each_intel_connector(dev, connector) |
15798 | struct intel_connector *intel_connector; | 15869 | connector->unregister(connector); |
15799 | |||
15800 | intel_connector = to_intel_connector(connector); | ||
15801 | intel_connector->unregister(intel_connector); | ||
15802 | } | ||
15803 | 15870 | ||
15804 | drm_mode_config_cleanup(dev); | 15871 | drm_mode_config_cleanup(dev); |
15805 | 15872 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 0f0573aa1b0d..796e3d313cb9 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -389,8 +389,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp) | |||
389 | * We don't have power sequencer currently. | 389 | * We don't have power sequencer currently. |
390 | * Pick one that's not used by other ports. | 390 | * Pick one that's not used by other ports. |
391 | */ | 391 | */ |
392 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, | 392 | for_each_intel_encoder(dev, encoder) { |
393 | base.head) { | ||
394 | struct intel_dp *tmp; | 393 | struct intel_dp *tmp; |
395 | 394 | ||
396 | if (encoder->type != INTEL_OUTPUT_EDP) | 395 | if (encoder->type != INTEL_OUTPUT_EDP) |
@@ -517,7 +516,7 @@ void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv) | |||
517 | struct drm_device *dev = dev_priv->dev; | 516 | struct drm_device *dev = dev_priv->dev; |
518 | struct intel_encoder *encoder; | 517 | struct intel_encoder *encoder; |
519 | 518 | ||
520 | if (WARN_ON(!IS_VALLEYVIEW(dev))) | 519 | if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))) |
521 | return; | 520 | return; |
522 | 521 | ||
523 | /* | 522 | /* |
@@ -530,7 +529,7 @@ void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv) | |||
530 | * should use them always. | 529 | * should use them always. |
531 | */ | 530 | */ |
532 | 531 | ||
533 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { | 532 | for_each_intel_encoder(dev, encoder) { |
534 | struct intel_dp *intel_dp; | 533 | struct intel_dp *intel_dp; |
535 | 534 | ||
536 | if (encoder->type != INTEL_OUTPUT_EDP) | 535 | if (encoder->type != INTEL_OUTPUT_EDP) |
@@ -582,7 +581,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code, | |||
582 | 581 | ||
583 | pps_lock(intel_dp); | 582 | pps_lock(intel_dp); |
584 | 583 | ||
585 | if (IS_VALLEYVIEW(dev)) { | 584 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
586 | enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); | 585 | enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); |
587 | i915_reg_t pp_ctrl_reg, pp_div_reg; | 586 | i915_reg_t pp_ctrl_reg, pp_div_reg; |
588 | u32 pp_div; | 587 | u32 pp_div; |
@@ -610,7 +609,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp) | |||
610 | 609 | ||
611 | lockdep_assert_held(&dev_priv->pps_mutex); | 610 | lockdep_assert_held(&dev_priv->pps_mutex); |
612 | 611 | ||
613 | if (IS_VALLEYVIEW(dev) && | 612 | if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && |
614 | intel_dp->pps_pipe == INVALID_PIPE) | 613 | intel_dp->pps_pipe == INVALID_PIPE) |
615 | return false; | 614 | return false; |
616 | 615 | ||
@@ -624,7 +623,7 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp) | |||
624 | 623 | ||
625 | lockdep_assert_held(&dev_priv->pps_mutex); | 624 | lockdep_assert_held(&dev_priv->pps_mutex); |
626 | 625 | ||
627 | if (IS_VALLEYVIEW(dev) && | 626 | if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && |
628 | intel_dp->pps_pipe == INVALID_PIPE) | 627 | intel_dp->pps_pipe == INVALID_PIPE) |
629 | return false; | 628 | return false; |
630 | 629 | ||
@@ -915,6 +914,27 @@ done: | |||
915 | /* Unload any bytes sent back from the other side */ | 914 | /* Unload any bytes sent back from the other side */ |
916 | recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> | 915 | recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> |
917 | DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); | 916 | DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); |
917 | |||
918 | /* | ||
919 | * By BSpec: "Message sizes of 0 or >20 are not allowed." | ||
920 | * We have no idea of what happened so we return -EBUSY so | ||
921 | * drm layer takes care for the necessary retries. | ||
922 | */ | ||
923 | if (recv_bytes == 0 || recv_bytes > 20) { | ||
924 | DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n", | ||
925 | recv_bytes); | ||
926 | /* | ||
927 | * FIXME: This patch was created on top of a series that | ||
928 | * organize the retries at drm level. There EBUSY should | ||
929 | * also take care for 1ms wait before retrying. | ||
930 | * That aux retries re-org is still needed and after that is | ||
931 | * merged we remove this sleep from here. | ||
932 | */ | ||
933 | usleep_range(1000, 1500); | ||
934 | ret = -EBUSY; | ||
935 | goto out; | ||
936 | } | ||
937 | |||
918 | if (recv_bytes > recv_size) | 938 | if (recv_bytes > recv_size) |
919 | recv_bytes = recv_size; | 939 | recv_bytes = recv_size; |
920 | 940 | ||
@@ -1723,7 +1743,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder) | |||
1723 | I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp); | 1743 | I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp); |
1724 | } else { | 1744 | } else { |
1725 | if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) && | 1745 | if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) && |
1726 | crtc->config->limited_color_range) | 1746 | !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range) |
1727 | intel_dp->DP |= DP_COLOR_RANGE_16_235; | 1747 | intel_dp->DP |= DP_COLOR_RANGE_16_235; |
1728 | 1748 | ||
1729 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | 1749 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
@@ -2418,7 +2438,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder, | |||
2418 | pipe_config->base.adjusted_mode.flags |= flags; | 2438 | pipe_config->base.adjusted_mode.flags |= flags; |
2419 | 2439 | ||
2420 | if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) && | 2440 | if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) && |
2421 | tmp & DP_COLOR_RANGE_16_235) | 2441 | !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235) |
2422 | pipe_config->limited_color_range = true; | 2442 | pipe_config->limited_color_range = true; |
2423 | 2443 | ||
2424 | pipe_config->has_dp_encoder = true; | 2444 | pipe_config->has_dp_encoder = true; |
@@ -2694,7 +2714,7 @@ static void intel_enable_dp(struct intel_encoder *encoder) | |||
2694 | 2714 | ||
2695 | pps_lock(intel_dp); | 2715 | pps_lock(intel_dp); |
2696 | 2716 | ||
2697 | if (IS_VALLEYVIEW(dev)) | 2717 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
2698 | vlv_init_panel_power_sequencer(intel_dp); | 2718 | vlv_init_panel_power_sequencer(intel_dp); |
2699 | 2719 | ||
2700 | /* | 2720 | /* |
@@ -2728,7 +2748,7 @@ static void intel_enable_dp(struct intel_encoder *encoder) | |||
2728 | 2748 | ||
2729 | pps_unlock(intel_dp); | 2749 | pps_unlock(intel_dp); |
2730 | 2750 | ||
2731 | if (IS_VALLEYVIEW(dev)) { | 2751 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
2732 | unsigned int lane_mask = 0x0; | 2752 | unsigned int lane_mask = 0x0; |
2733 | 2753 | ||
2734 | if (IS_CHERRYVIEW(dev)) | 2754 | if (IS_CHERRYVIEW(dev)) |
@@ -2829,8 +2849,7 @@ static void vlv_steal_power_sequencer(struct drm_device *dev, | |||
2829 | if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B)) | 2849 | if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B)) |
2830 | return; | 2850 | return; |
2831 | 2851 | ||
2832 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, | 2852 | for_each_intel_encoder(dev, encoder) { |
2833 | base.head) { | ||
2834 | struct intel_dp *intel_dp; | 2853 | struct intel_dp *intel_dp; |
2835 | enum port port; | 2854 | enum port port; |
2836 | 2855 | ||
@@ -3218,7 +3237,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp) | |||
3218 | if (dev_priv->edp_low_vswing && port == PORT_A) | 3237 | if (dev_priv->edp_low_vswing && port == PORT_A) |
3219 | return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; | 3238 | return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; |
3220 | return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; | 3239 | return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; |
3221 | } else if (IS_VALLEYVIEW(dev)) | 3240 | } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
3222 | return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; | 3241 | return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; |
3223 | else if (IS_GEN7(dev) && port == PORT_A) | 3242 | else if (IS_GEN7(dev) && port == PORT_A) |
3224 | return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; | 3243 | return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; |
@@ -3259,7 +3278,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) | |||
3259 | default: | 3278 | default: |
3260 | return DP_TRAIN_PRE_EMPH_LEVEL_0; | 3279 | return DP_TRAIN_PRE_EMPH_LEVEL_0; |
3261 | } | 3280 | } |
3262 | } else if (IS_VALLEYVIEW(dev)) { | 3281 | } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
3263 | switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { | 3282 | switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { |
3264 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: | 3283 | case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: |
3265 | return DP_TRAIN_PRE_EMPH_LEVEL_3; | 3284 | return DP_TRAIN_PRE_EMPH_LEVEL_3; |
@@ -4539,7 +4558,7 @@ bool intel_digital_port_connected(struct drm_i915_private *dev_priv, | |||
4539 | return cpt_digital_port_connected(dev_priv, port); | 4558 | return cpt_digital_port_connected(dev_priv, port); |
4540 | else if (IS_BROXTON(dev_priv)) | 4559 | else if (IS_BROXTON(dev_priv)) |
4541 | return bxt_digital_port_connected(dev_priv, port); | 4560 | return bxt_digital_port_connected(dev_priv, port); |
4542 | else if (IS_VALLEYVIEW(dev_priv)) | 4561 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
4543 | return vlv_digital_port_connected(dev_priv, port); | 4562 | return vlv_digital_port_connected(dev_priv, port); |
4544 | else | 4563 | else |
4545 | return g4x_digital_port_connected(dev_priv, port); | 4564 | return g4x_digital_port_connected(dev_priv, port); |
@@ -4933,7 +4952,7 @@ static void intel_dp_encoder_reset(struct drm_encoder *encoder) | |||
4933 | * Read out the current power sequencer assignment, | 4952 | * Read out the current power sequencer assignment, |
4934 | * in case the BIOS did something with it. | 4953 | * in case the BIOS did something with it. |
4935 | */ | 4954 | */ |
4936 | if (IS_VALLEYVIEW(encoder->dev)) | 4955 | if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev)) |
4937 | vlv_initial_power_sequencer_setup(intel_dp); | 4956 | vlv_initial_power_sequencer_setup(intel_dp); |
4938 | 4957 | ||
4939 | intel_edp_panel_vdd_sanitize(intel_dp); | 4958 | intel_edp_panel_vdd_sanitize(intel_dp); |
@@ -5293,7 +5312,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, | |||
5293 | 5312 | ||
5294 | /* Haswell doesn't have any port selection bits for the panel | 5313 | /* Haswell doesn't have any port selection bits for the panel |
5295 | * power sequencer any more. */ | 5314 | * power sequencer any more. */ |
5296 | if (IS_VALLEYVIEW(dev)) { | 5315 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
5297 | port_sel = PANEL_PORT_SELECT_VLV(port); | 5316 | port_sel = PANEL_PORT_SELECT_VLV(port); |
5298 | } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { | 5317 | } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { |
5299 | if (port == PORT_A) | 5318 | if (port == PORT_A) |
@@ -5405,12 +5424,12 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate) | |||
5405 | 5424 | ||
5406 | val = I915_READ(reg); | 5425 | val = I915_READ(reg); |
5407 | if (index > DRRS_HIGH_RR) { | 5426 | if (index > DRRS_HIGH_RR) { |
5408 | if (IS_VALLEYVIEW(dev)) | 5427 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
5409 | val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; | 5428 | val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; |
5410 | else | 5429 | else |
5411 | val |= PIPECONF_EDP_RR_MODE_SWITCH; | 5430 | val |= PIPECONF_EDP_RR_MODE_SWITCH; |
5412 | } else { | 5431 | } else { |
5413 | if (IS_VALLEYVIEW(dev)) | 5432 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
5414 | val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; | 5433 | val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; |
5415 | else | 5434 | else |
5416 | val &= ~PIPECONF_EDP_RR_MODE_SWITCH; | 5435 | val &= ~PIPECONF_EDP_RR_MODE_SWITCH; |
@@ -5777,7 +5796,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, | |||
5777 | } | 5796 | } |
5778 | mutex_unlock(&dev->mode_config.mutex); | 5797 | mutex_unlock(&dev->mode_config.mutex); |
5779 | 5798 | ||
5780 | if (IS_VALLEYVIEW(dev)) { | 5799 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
5781 | intel_dp->edp_notifier.notifier_call = edp_notify_handler; | 5800 | intel_dp->edp_notifier.notifier_call = edp_notify_handler; |
5782 | register_reboot_notifier(&intel_dp->edp_notifier); | 5801 | register_reboot_notifier(&intel_dp->edp_notifier); |
5783 | 5802 | ||
@@ -5825,7 +5844,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | |||
5825 | /* intel_dp vfuncs */ | 5844 | /* intel_dp vfuncs */ |
5826 | if (INTEL_INFO(dev)->gen >= 9) | 5845 | if (INTEL_INFO(dev)->gen >= 9) |
5827 | intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; | 5846 | intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; |
5828 | else if (IS_VALLEYVIEW(dev)) | 5847 | else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
5829 | intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider; | 5848 | intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider; |
5830 | else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) | 5849 | else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
5831 | intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; | 5850 | intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; |
@@ -5860,8 +5879,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | |||
5860 | intel_encoder->type = INTEL_OUTPUT_EDP; | 5879 | intel_encoder->type = INTEL_OUTPUT_EDP; |
5861 | 5880 | ||
5862 | /* eDP only on port B and/or C on vlv/chv */ | 5881 | /* eDP only on port B and/or C on vlv/chv */ |
5863 | if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) && | 5882 | if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && |
5864 | port != PORT_B && port != PORT_C)) | 5883 | is_edp(intel_dp) && port != PORT_B && port != PORT_C)) |
5865 | return false; | 5884 | return false; |
5866 | 5885 | ||
5867 | DRM_DEBUG_KMS("Adding %s connector on port %c\n", | 5886 | DRM_DEBUG_KMS("Adding %s connector on port %c\n", |
@@ -5912,7 +5931,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | |||
5912 | if (is_edp(intel_dp)) { | 5931 | if (is_edp(intel_dp)) { |
5913 | pps_lock(intel_dp); | 5932 | pps_lock(intel_dp); |
5914 | intel_dp_init_panel_power_timestamps(intel_dp); | 5933 | intel_dp_init_panel_power_timestamps(intel_dp); |
5915 | if (IS_VALLEYVIEW(dev)) | 5934 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
5916 | vlv_initial_power_sequencer_setup(intel_dp); | 5935 | vlv_initial_power_sequencer_setup(intel_dp); |
5917 | else | 5936 | else |
5918 | intel_dp_init_panel_power_sequencer(dev, intel_dp); | 5937 | intel_dp_init_panel_power_sequencer(dev, intel_dp); |
@@ -5988,8 +6007,9 @@ intel_dp_init(struct drm_device *dev, | |||
5988 | intel_encoder = &intel_dig_port->base; | 6007 | intel_encoder = &intel_dig_port->base; |
5989 | encoder = &intel_encoder->base; | 6008 | encoder = &intel_encoder->base; |
5990 | 6009 | ||
5991 | drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, | 6010 | if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, |
5992 | DRM_MODE_ENCODER_TMDS, NULL); | 6011 | DRM_MODE_ENCODER_TMDS, NULL)) |
6012 | goto err_encoder_init; | ||
5993 | 6013 | ||
5994 | intel_encoder->compute_config = intel_dp_compute_config; | 6014 | intel_encoder->compute_config = intel_dp_compute_config; |
5995 | intel_encoder->disable = intel_disable_dp; | 6015 | intel_encoder->disable = intel_disable_dp; |
@@ -6015,6 +6035,7 @@ intel_dp_init(struct drm_device *dev, | |||
6015 | } | 6035 | } |
6016 | 6036 | ||
6017 | intel_dig_port->port = port; | 6037 | intel_dig_port->port = port; |
6038 | dev_priv->dig_port_map[port] = intel_encoder; | ||
6018 | intel_dig_port->dp.output_reg = output_reg; | 6039 | intel_dig_port->dp.output_reg = output_reg; |
6019 | 6040 | ||
6020 | intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; | 6041 | intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; |
@@ -6038,6 +6059,7 @@ intel_dp_init(struct drm_device *dev, | |||
6038 | 6059 | ||
6039 | err_init_connector: | 6060 | err_init_connector: |
6040 | drm_encoder_cleanup(encoder); | 6061 | drm_encoder_cleanup(encoder); |
6062 | err_encoder_init: | ||
6041 | kfree(intel_connector); | 6063 | kfree(intel_connector); |
6042 | err_connector_alloc: | 6064 | err_connector_alloc: |
6043 | kfree(intel_dig_port); | 6065 | kfree(intel_dig_port); |
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index e8d369d0a713..e2f515d3816f 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c | |||
@@ -78,6 +78,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, | |||
78 | return false; | 78 | return false; |
79 | } | 79 | } |
80 | 80 | ||
81 | if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, found->port)) | ||
82 | pipe_config->has_audio = true; | ||
81 | mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp); | 83 | mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp); |
82 | 84 | ||
83 | pipe_config->pbn = mst_pbn; | 85 | pipe_config->pbn = mst_pbn; |
@@ -102,6 +104,11 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder) | |||
102 | struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); | 104 | struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); |
103 | struct intel_digital_port *intel_dig_port = intel_mst->primary; | 105 | struct intel_digital_port *intel_dig_port = intel_mst->primary; |
104 | struct intel_dp *intel_dp = &intel_dig_port->dp; | 106 | struct intel_dp *intel_dp = &intel_dig_port->dp; |
107 | struct drm_device *dev = encoder->base.dev; | ||
108 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
109 | struct drm_crtc *crtc = encoder->base.crtc; | ||
110 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
111 | |||
105 | int ret; | 112 | int ret; |
106 | 113 | ||
107 | DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); | 114 | DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); |
@@ -112,6 +119,10 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder) | |||
112 | if (ret) { | 119 | if (ret) { |
113 | DRM_ERROR("failed to update payload %d\n", ret); | 120 | DRM_ERROR("failed to update payload %d\n", ret); |
114 | } | 121 | } |
122 | if (intel_crtc->config->has_audio) { | ||
123 | intel_audio_codec_disable(encoder); | ||
124 | intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO); | ||
125 | } | ||
115 | } | 126 | } |
116 | 127 | ||
117 | static void intel_mst_post_disable_dp(struct intel_encoder *encoder) | 128 | static void intel_mst_post_disable_dp(struct intel_encoder *encoder) |
@@ -208,6 +219,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder) | |||
208 | struct intel_dp *intel_dp = &intel_dig_port->dp; | 219 | struct intel_dp *intel_dp = &intel_dig_port->dp; |
209 | struct drm_device *dev = intel_dig_port->base.base.dev; | 220 | struct drm_device *dev = intel_dig_port->base.base.dev; |
210 | struct drm_i915_private *dev_priv = dev->dev_private; | 221 | struct drm_i915_private *dev_priv = dev->dev_private; |
222 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); | ||
211 | enum port port = intel_dig_port->port; | 223 | enum port port = intel_dig_port->port; |
212 | int ret; | 224 | int ret; |
213 | 225 | ||
@@ -220,6 +232,13 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder) | |||
220 | ret = drm_dp_check_act_status(&intel_dp->mst_mgr); | 232 | ret = drm_dp_check_act_status(&intel_dp->mst_mgr); |
221 | 233 | ||
222 | ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr); | 234 | ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr); |
235 | |||
236 | if (crtc->config->has_audio) { | ||
237 | DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", | ||
238 | pipe_name(crtc->pipe)); | ||
239 | intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO); | ||
240 | intel_audio_codec_enable(encoder); | ||
241 | } | ||
223 | } | 242 | } |
224 | 243 | ||
225 | static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder, | 244 | static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder, |
@@ -245,6 +264,9 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder, | |||
245 | 264 | ||
246 | pipe_config->has_dp_encoder = true; | 265 | pipe_config->has_dp_encoder = true; |
247 | 266 | ||
267 | pipe_config->has_audio = | ||
268 | intel_ddi_is_audio_enabled(dev_priv, crtc); | ||
269 | |||
248 | temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); | 270 | temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); |
249 | if (temp & TRANS_DDI_PHSYNC) | 271 | if (temp & TRANS_DDI_PHSYNC) |
250 | flags |= DRM_MODE_FLAG_PHSYNC; | 272 | flags |= DRM_MODE_FLAG_PHSYNC; |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 50f83d220249..d523ebb2f89d 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -365,7 +365,9 @@ struct intel_crtc_state { | |||
365 | #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ | 365 | #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ |
366 | unsigned long quirks; | 366 | unsigned long quirks; |
367 | 367 | ||
368 | bool update_pipe; | 368 | bool update_pipe; /* can a fast modeset be performed? */ |
369 | bool disable_cxsr; | ||
370 | bool wm_changed; /* watermarks are updated */ | ||
369 | 371 | ||
370 | /* Pipe source size (ie. panel fitter input size) | 372 | /* Pipe source size (ie. panel fitter input size) |
371 | * All planes will be positioned inside this space, | 373 | * All planes will be positioned inside this space, |
@@ -531,9 +533,7 @@ struct intel_crtc_atomic_commit { | |||
531 | /* Sleepable operations to perform before commit */ | 533 | /* Sleepable operations to perform before commit */ |
532 | bool disable_fbc; | 534 | bool disable_fbc; |
533 | bool disable_ips; | 535 | bool disable_ips; |
534 | bool disable_cxsr; | ||
535 | bool pre_disable_primary; | 536 | bool pre_disable_primary; |
536 | bool update_wm_pre, update_wm_post; | ||
537 | 537 | ||
538 | /* Sleepable operations to perform after commit */ | 538 | /* Sleepable operations to perform after commit */ |
539 | unsigned fb_bits; | 539 | unsigned fb_bits; |
@@ -568,7 +568,6 @@ struct intel_crtc { | |||
568 | int adjusted_x; | 568 | int adjusted_x; |
569 | int adjusted_y; | 569 | int adjusted_y; |
570 | 570 | ||
571 | struct drm_i915_gem_object *cursor_bo; | ||
572 | uint32_t cursor_addr; | 571 | uint32_t cursor_addr; |
573 | uint32_t cursor_cntl; | 572 | uint32_t cursor_cntl; |
574 | uint32_t cursor_size; | 573 | uint32_t cursor_size; |
@@ -818,6 +817,8 @@ struct intel_digital_port { | |||
818 | struct intel_hdmi hdmi; | 817 | struct intel_hdmi hdmi; |
819 | enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool); | 818 | enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool); |
820 | bool release_cl2_override; | 819 | bool release_cl2_override; |
820 | /* for communication with audio component; protected by av_mutex */ | ||
821 | const struct drm_connector *audio_connector; | ||
821 | }; | 822 | }; |
822 | 823 | ||
823 | struct intel_dp_mst_encoder { | 824 | struct intel_dp_mst_encoder { |
@@ -1012,6 +1013,8 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc); | |||
1012 | void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp); | 1013 | void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp); |
1013 | bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); | 1014 | bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); |
1014 | void intel_ddi_fdi_disable(struct drm_crtc *crtc); | 1015 | void intel_ddi_fdi_disable(struct drm_crtc *crtc); |
1016 | bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv, | ||
1017 | struct intel_crtc *intel_crtc); | ||
1015 | void intel_ddi_get_config(struct intel_encoder *encoder, | 1018 | void intel_ddi_get_config(struct intel_encoder *encoder, |
1016 | struct intel_crtc_state *pipe_config); | 1019 | struct intel_crtc_state *pipe_config); |
1017 | struct intel_encoder * | 1020 | struct intel_encoder * |
@@ -1427,6 +1430,87 @@ void intel_display_power_get(struct drm_i915_private *dev_priv, | |||
1427 | enum intel_display_power_domain domain); | 1430 | enum intel_display_power_domain domain); |
1428 | void intel_display_power_put(struct drm_i915_private *dev_priv, | 1431 | void intel_display_power_put(struct drm_i915_private *dev_priv, |
1429 | enum intel_display_power_domain domain); | 1432 | enum intel_display_power_domain domain); |
1433 | |||
1434 | static inline void | ||
1435 | assert_rpm_device_not_suspended(struct drm_i915_private *dev_priv) | ||
1436 | { | ||
1437 | WARN_ONCE(dev_priv->pm.suspended, | ||
1438 | "Device suspended during HW access\n"); | ||
1439 | } | ||
1440 | |||
1441 | static inline void | ||
1442 | assert_rpm_wakelock_held(struct drm_i915_private *dev_priv) | ||
1443 | { | ||
1444 | assert_rpm_device_not_suspended(dev_priv); | ||
1445 | WARN_ONCE(!atomic_read(&dev_priv->pm.wakeref_count), | ||
1446 | "RPM wakelock ref not held during HW access"); | ||
1447 | } | ||
1448 | |||
1449 | static inline int | ||
1450 | assert_rpm_atomic_begin(struct drm_i915_private *dev_priv) | ||
1451 | { | ||
1452 | int seq = atomic_read(&dev_priv->pm.atomic_seq); | ||
1453 | |||
1454 | assert_rpm_wakelock_held(dev_priv); | ||
1455 | |||
1456 | return seq; | ||
1457 | } | ||
1458 | |||
1459 | static inline void | ||
1460 | assert_rpm_atomic_end(struct drm_i915_private *dev_priv, int begin_seq) | ||
1461 | { | ||
1462 | WARN_ONCE(atomic_read(&dev_priv->pm.atomic_seq) != begin_seq, | ||
1463 | "HW access outside of RPM atomic section\n"); | ||
1464 | } | ||
1465 | |||
1466 | /** | ||
1467 | * disable_rpm_wakeref_asserts - disable the RPM assert checks | ||
1468 | * @dev_priv: i915 device instance | ||
1469 | * | ||
1470 | * This function disable asserts that check if we hold an RPM wakelock | ||
1471 | * reference, while keeping the device-not-suspended checks still enabled. | ||
1472 | * It's meant to be used only in special circumstances where our rule about | ||
1473 | * the wakelock refcount wrt. the device power state doesn't hold. According | ||
1474 | * to this rule at any point where we access the HW or want to keep the HW in | ||
1475 | * an active state we must hold an RPM wakelock reference acquired via one of | ||
1476 | * the intel_runtime_pm_get() helpers. Currently there are a few special spots | ||
1477 | * where this rule doesn't hold: the IRQ and suspend/resume handlers, the | ||
1478 | * forcewake release timer, and the GPU RPS and hangcheck works. All other | ||
1479 | * users should avoid using this function. | ||
1480 | * | ||
1481 | * Any calls to this function must have a symmetric call to | ||
1482 | * enable_rpm_wakeref_asserts(). | ||
1483 | */ | ||
1484 | static inline void | ||
1485 | disable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv) | ||
1486 | { | ||
1487 | atomic_inc(&dev_priv->pm.wakeref_count); | ||
1488 | } | ||
1489 | |||
1490 | /** | ||
1491 | * enable_rpm_wakeref_asserts - re-enable the RPM assert checks | ||
1492 | * @dev_priv: i915 device instance | ||
1493 | * | ||
1494 | * This function re-enables the RPM assert checks after disabling them with | ||
1495 | * disable_rpm_wakeref_asserts. It's meant to be used only in special | ||
1496 | * circumstances otherwise its use should be avoided. | ||
1497 | * | ||
1498 | * Any calls to this function must have a symmetric call to | ||
1499 | * disable_rpm_wakeref_asserts(). | ||
1500 | */ | ||
1501 | static inline void | ||
1502 | enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv) | ||
1503 | { | ||
1504 | atomic_dec(&dev_priv->pm.wakeref_count); | ||
1505 | } | ||
1506 | |||
1507 | /* TODO: convert users of these to rely instead on proper RPM refcounting */ | ||
1508 | #define DISABLE_RPM_WAKEREF_ASSERTS(dev_priv) \ | ||
1509 | disable_rpm_wakeref_asserts(dev_priv) | ||
1510 | |||
1511 | #define ENABLE_RPM_WAKEREF_ASSERTS(dev_priv) \ | ||
1512 | enable_rpm_wakeref_asserts(dev_priv) | ||
1513 | |||
1430 | void intel_runtime_pm_get(struct drm_i915_private *dev_priv); | 1514 | void intel_runtime_pm_get(struct drm_i915_private *dev_priv); |
1431 | void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv); | 1515 | void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv); |
1432 | void intel_runtime_pm_put(struct drm_i915_private *dev_priv); | 1516 | void intel_runtime_pm_put(struct drm_i915_private *dev_priv); |
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index fff9a66c32a1..44742fa2f616 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c | |||
@@ -369,7 +369,7 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder) | |||
369 | { | 369 | { |
370 | struct drm_device *dev = encoder->base.dev; | 370 | struct drm_device *dev = encoder->base.dev; |
371 | 371 | ||
372 | if (IS_VALLEYVIEW(dev)) | 372 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
373 | vlv_dsi_device_ready(encoder); | 373 | vlv_dsi_device_ready(encoder); |
374 | else if (IS_BROXTON(dev)) | 374 | else if (IS_BROXTON(dev)) |
375 | bxt_dsi_device_ready(encoder); | 375 | bxt_dsi_device_ready(encoder); |
@@ -487,7 +487,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder) | |||
487 | 487 | ||
488 | msleep(intel_dsi->panel_on_delay); | 488 | msleep(intel_dsi->panel_on_delay); |
489 | 489 | ||
490 | if (IS_VALLEYVIEW(dev)) { | 490 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
491 | /* | 491 | /* |
492 | * Disable DPOunit clock gating, can stall pipe | 492 | * Disable DPOunit clock gating, can stall pipe |
493 | * and we need DPLL REFA always enabled | 493 | * and we need DPLL REFA always enabled |
@@ -684,8 +684,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, | |||
684 | * Enable bit does not get set. To check whether DSI Port C | 684 | * Enable bit does not get set. To check whether DSI Port C |
685 | * was enabled in BIOS, check the Pipe B enable bit | 685 | * was enabled in BIOS, check the Pipe B enable bit |
686 | */ | 686 | */ |
687 | if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && | 687 | if (IS_VALLEYVIEW(dev) && port == PORT_C) |
688 | (port == PORT_C)) | ||
689 | dpi_enabled = I915_READ(PIPECONF(PIPE_B)) & | 688 | dpi_enabled = I915_READ(PIPECONF(PIPE_B)) & |
690 | PIPECONF_ENABLE; | 689 | PIPECONF_ENABLE; |
691 | 690 | ||
@@ -716,7 +715,8 @@ static void intel_dsi_get_config(struct intel_encoder *encoder, | |||
716 | 715 | ||
717 | if (IS_BROXTON(encoder->base.dev)) | 716 | if (IS_BROXTON(encoder->base.dev)) |
718 | pclk = bxt_get_dsi_pclk(encoder, pipe_config->pipe_bpp); | 717 | pclk = bxt_get_dsi_pclk(encoder, pipe_config->pipe_bpp); |
719 | else if (IS_VALLEYVIEW(encoder->base.dev)) | 718 | else if (IS_VALLEYVIEW(encoder->base.dev) || |
719 | IS_CHERRYVIEW(encoder->base.dev)) | ||
720 | pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp); | 720 | pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp); |
721 | 721 | ||
722 | if (!pclk) | 722 | if (!pclk) |
@@ -869,7 +869,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder) | |||
869 | } | 869 | } |
870 | 870 | ||
871 | for_each_dsi_port(port, intel_dsi->ports) { | 871 | for_each_dsi_port(port, intel_dsi->ports) { |
872 | if (IS_VALLEYVIEW(dev)) { | 872 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
873 | /* | 873 | /* |
874 | * escape clock divider, 20MHz, shared for A and C. | 874 | * escape clock divider, 20MHz, shared for A and C. |
875 | * device ready must be off when doing this! txclkesc? | 875 | * device ready must be off when doing this! txclkesc? |
@@ -885,21 +885,12 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder) | |||
885 | I915_WRITE(MIPI_CTRL(port), tmp | | 885 | I915_WRITE(MIPI_CTRL(port), tmp | |
886 | READ_REQUEST_PRIORITY_HIGH); | 886 | READ_REQUEST_PRIORITY_HIGH); |
887 | } else if (IS_BROXTON(dev)) { | 887 | } else if (IS_BROXTON(dev)) { |
888 | /* | 888 | enum pipe pipe = intel_crtc->pipe; |
889 | * FIXME: | 889 | |
890 | * BXT can connect any PIPE to any MIPI port. | ||
891 | * Select the pipe based on the MIPI port read from | ||
892 | * VBT for now. Pick PIPE A for MIPI port A and C | ||
893 | * for port C. | ||
894 | */ | ||
895 | tmp = I915_READ(MIPI_CTRL(port)); | 890 | tmp = I915_READ(MIPI_CTRL(port)); |
896 | tmp &= ~BXT_PIPE_SELECT_MASK; | 891 | tmp &= ~BXT_PIPE_SELECT_MASK; |
897 | 892 | ||
898 | if (port == PORT_A) | 893 | tmp |= BXT_PIPE_SELECT(pipe); |
899 | tmp |= BXT_PIPE_SELECT_A; | ||
900 | else if (port == PORT_C) | ||
901 | tmp |= BXT_PIPE_SELECT_C; | ||
902 | |||
903 | I915_WRITE(MIPI_CTRL(port), tmp); | 894 | I915_WRITE(MIPI_CTRL(port), tmp); |
904 | } | 895 | } |
905 | 896 | ||
@@ -1129,7 +1120,7 @@ void intel_dsi_init(struct drm_device *dev) | |||
1129 | if (!dev_priv->vbt.has_mipi) | 1120 | if (!dev_priv->vbt.has_mipi) |
1130 | return; | 1121 | return; |
1131 | 1122 | ||
1132 | if (IS_VALLEYVIEW(dev)) { | 1123 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
1133 | dev_priv->mipi_mmio_base = VLV_MIPI_BASE; | 1124 | dev_priv->mipi_mmio_base = VLV_MIPI_BASE; |
1134 | } else { | 1125 | } else { |
1135 | DRM_ERROR("Unsupported Mipi device to reg base"); | 1126 | DRM_ERROR("Unsupported Mipi device to reg base"); |
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c index cb3cf3986212..fbd2b51810ca 100644 --- a/drivers/gpu/drm/i915/intel_dsi_pll.c +++ b/drivers/gpu/drm/i915/intel_dsi_pll.c | |||
@@ -561,7 +561,7 @@ void intel_enable_dsi_pll(struct intel_encoder *encoder) | |||
561 | { | 561 | { |
562 | struct drm_device *dev = encoder->base.dev; | 562 | struct drm_device *dev = encoder->base.dev; |
563 | 563 | ||
564 | if (IS_VALLEYVIEW(dev)) | 564 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
565 | vlv_enable_dsi_pll(encoder); | 565 | vlv_enable_dsi_pll(encoder); |
566 | else if (IS_BROXTON(dev)) | 566 | else if (IS_BROXTON(dev)) |
567 | bxt_enable_dsi_pll(encoder); | 567 | bxt_enable_dsi_pll(encoder); |
@@ -571,7 +571,7 @@ void intel_disable_dsi_pll(struct intel_encoder *encoder) | |||
571 | { | 571 | { |
572 | struct drm_device *dev = encoder->base.dev; | 572 | struct drm_device *dev = encoder->base.dev; |
573 | 573 | ||
574 | if (IS_VALLEYVIEW(dev)) | 574 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
575 | vlv_disable_dsi_pll(encoder); | 575 | vlv_disable_dsi_pll(encoder); |
576 | else if (IS_BROXTON(dev)) | 576 | else if (IS_BROXTON(dev)) |
577 | bxt_disable_dsi_pll(encoder); | 577 | bxt_disable_dsi_pll(encoder); |
@@ -599,6 +599,6 @@ void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) | |||
599 | 599 | ||
600 | if (IS_BROXTON(dev)) | 600 | if (IS_BROXTON(dev)) |
601 | bxt_dsi_reset_clocks(encoder, port); | 601 | bxt_dsi_reset_clocks(encoder, port); |
602 | else if (IS_VALLEYVIEW(dev)) | 602 | else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
603 | vlv_dsi_reset_clocks(encoder, port); | 603 | vlv_dsi_reset_clocks(encoder, port); |
604 | } | 604 | } |
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 7ccde58f8c98..bea75cafc623 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c | |||
@@ -163,13 +163,6 @@ static int intelfb_alloc(struct drm_fb_helper *helper, | |||
163 | goto out; | 163 | goto out; |
164 | } | 164 | } |
165 | 165 | ||
166 | /* Flush everything out, we'll be doing GTT only from now on */ | ||
167 | ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL); | ||
168 | if (ret) { | ||
169 | DRM_ERROR("failed to pin obj: %d\n", ret); | ||
170 | goto out; | ||
171 | } | ||
172 | |||
173 | mutex_unlock(&dev->struct_mutex); | 166 | mutex_unlock(&dev->struct_mutex); |
174 | 167 | ||
175 | ifbdev->fb = to_intel_framebuffer(fb); | 168 | ifbdev->fb = to_intel_framebuffer(fb); |
@@ -225,6 +218,14 @@ static int intelfb_create(struct drm_fb_helper *helper, | |||
225 | 218 | ||
226 | mutex_lock(&dev->struct_mutex); | 219 | mutex_lock(&dev->struct_mutex); |
227 | 220 | ||
221 | /* Pin the GGTT vma for our access via info->screen_base. | ||
222 | * This also validates that any existing fb inherited from the | ||
223 | * BIOS is suitable for own access. | ||
224 | */ | ||
225 | ret = intel_pin_and_fence_fb_obj(NULL, &ifbdev->fb->base, NULL); | ||
226 | if (ret) | ||
227 | goto out_unlock; | ||
228 | |||
228 | info = drm_fb_helper_alloc_fbi(helper); | 229 | info = drm_fb_helper_alloc_fbi(helper); |
229 | if (IS_ERR(info)) { | 230 | if (IS_ERR(info)) { |
230 | DRM_ERROR("Failed to allocate fb_info\n"); | 231 | DRM_ERROR("Failed to allocate fb_info\n"); |
@@ -287,6 +288,7 @@ out_destroy_fbi: | |||
287 | drm_fb_helper_release_fbi(helper); | 288 | drm_fb_helper_release_fbi(helper); |
288 | out_unpin: | 289 | out_unpin: |
289 | i915_gem_object_ggtt_unpin(obj); | 290 | i915_gem_object_ggtt_unpin(obj); |
291 | out_unlock: | ||
290 | mutex_unlock(&dev->struct_mutex); | 292 | mutex_unlock(&dev->struct_mutex); |
291 | return ret; | 293 | return ret; |
292 | } | 294 | } |
@@ -524,6 +526,10 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = { | |||
524 | static void intel_fbdev_destroy(struct drm_device *dev, | 526 | static void intel_fbdev_destroy(struct drm_device *dev, |
525 | struct intel_fbdev *ifbdev) | 527 | struct intel_fbdev *ifbdev) |
526 | { | 528 | { |
529 | /* We rely on the object-free to release the VMA pinning for | ||
530 | * the info->screen_base mmaping. Leaking the VMA is simpler than | ||
531 | * trying to rectify all the possible error paths leading here. | ||
532 | */ | ||
527 | 533 | ||
528 | drm_fb_helper_unregister_fbi(&ifbdev->helper); | 534 | drm_fb_helper_unregister_fbi(&ifbdev->helper); |
529 | drm_fb_helper_release_fbi(&ifbdev->helper); | 535 | drm_fb_helper_release_fbi(&ifbdev->helper); |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 00d065fee506..a372cc392510 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -78,7 +78,7 @@ static u32 g4x_infoframe_index(enum hdmi_infoframe_type type) | |||
78 | case HDMI_INFOFRAME_TYPE_VENDOR: | 78 | case HDMI_INFOFRAME_TYPE_VENDOR: |
79 | return VIDEO_DIP_SELECT_VENDOR; | 79 | return VIDEO_DIP_SELECT_VENDOR; |
80 | default: | 80 | default: |
81 | DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); | 81 | MISSING_CASE(type); |
82 | return 0; | 82 | return 0; |
83 | } | 83 | } |
84 | } | 84 | } |
@@ -93,7 +93,7 @@ static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type) | |||
93 | case HDMI_INFOFRAME_TYPE_VENDOR: | 93 | case HDMI_INFOFRAME_TYPE_VENDOR: |
94 | return VIDEO_DIP_ENABLE_VENDOR; | 94 | return VIDEO_DIP_ENABLE_VENDOR; |
95 | default: | 95 | default: |
96 | DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); | 96 | MISSING_CASE(type); |
97 | return 0; | 97 | return 0; |
98 | } | 98 | } |
99 | } | 99 | } |
@@ -108,7 +108,7 @@ static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type) | |||
108 | case HDMI_INFOFRAME_TYPE_VENDOR: | 108 | case HDMI_INFOFRAME_TYPE_VENDOR: |
109 | return VIDEO_DIP_ENABLE_VS_HSW; | 109 | return VIDEO_DIP_ENABLE_VS_HSW; |
110 | default: | 110 | default: |
111 | DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); | 111 | MISSING_CASE(type); |
112 | return 0; | 112 | return 0; |
113 | } | 113 | } |
114 | } | 114 | } |
@@ -127,7 +127,7 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv, | |||
127 | case HDMI_INFOFRAME_TYPE_VENDOR: | 127 | case HDMI_INFOFRAME_TYPE_VENDOR: |
128 | return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder, i); | 128 | return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder, i); |
129 | default: | 129 | default: |
130 | DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); | 130 | MISSING_CASE(type); |
131 | return INVALID_MMIO_REG; | 131 | return INVALID_MMIO_REG; |
132 | } | 132 | } |
133 | } | 133 | } |
@@ -375,8 +375,6 @@ static void hsw_write_infoframe(struct drm_encoder *encoder, | |||
375 | u32 val = I915_READ(ctl_reg); | 375 | u32 val = I915_READ(ctl_reg); |
376 | 376 | ||
377 | data_reg = hsw_dip_data_reg(dev_priv, cpu_transcoder, type, 0); | 377 | data_reg = hsw_dip_data_reg(dev_priv, cpu_transcoder, type, 0); |
378 | if (i915_mmio_reg_valid(data_reg)) | ||
379 | return; | ||
380 | 378 | ||
381 | val &= ~hsw_infoframe_enable(type); | 379 | val &= ~hsw_infoframe_enable(type); |
382 | I915_WRITE(ctl_reg, val); | 380 | I915_WRITE(ctl_reg, val); |
@@ -638,7 +636,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder) | |||
638 | 636 | ||
639 | if (HAS_DDI(dev_priv)) | 637 | if (HAS_DDI(dev_priv)) |
640 | reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder); | 638 | reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder); |
641 | else if (IS_VALLEYVIEW(dev_priv)) | 639 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
642 | reg = VLV_TVIDEO_DIP_GCP(crtc->pipe); | 640 | reg = VLV_TVIDEO_DIP_GCP(crtc->pipe); |
643 | else if (HAS_PCH_SPLIT(dev_priv->dev)) | 641 | else if (HAS_PCH_SPLIT(dev_priv->dev)) |
644 | reg = TVIDEO_DIP_GCP(crtc->pipe); | 642 | reg = TVIDEO_DIP_GCP(crtc->pipe); |
@@ -1397,7 +1395,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) | |||
1397 | while (!live_status && --retry) { | 1395 | while (!live_status && --retry) { |
1398 | live_status = intel_digital_port_connected(dev_priv, | 1396 | live_status = intel_digital_port_connected(dev_priv, |
1399 | hdmi_to_dig_port(intel_hdmi)); | 1397 | hdmi_to_dig_port(intel_hdmi)); |
1400 | mdelay(10); | 1398 | msleep(10); |
1401 | } | 1399 | } |
1402 | 1400 | ||
1403 | if (!live_status) | 1401 | if (!live_status) |
@@ -2100,7 +2098,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, | |||
2100 | BUG(); | 2098 | BUG(); |
2101 | } | 2099 | } |
2102 | 2100 | ||
2103 | if (IS_VALLEYVIEW(dev)) { | 2101 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
2104 | intel_hdmi->write_infoframe = vlv_write_infoframe; | 2102 | intel_hdmi->write_infoframe = vlv_write_infoframe; |
2105 | intel_hdmi->set_infoframes = vlv_set_infoframes; | 2103 | intel_hdmi->set_infoframes = vlv_set_infoframes; |
2106 | intel_hdmi->infoframe_enabled = vlv_infoframe_enabled; | 2104 | intel_hdmi->infoframe_enabled = vlv_infoframe_enabled; |
@@ -2147,6 +2145,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, | |||
2147 | void intel_hdmi_init(struct drm_device *dev, | 2145 | void intel_hdmi_init(struct drm_device *dev, |
2148 | i915_reg_t hdmi_reg, enum port port) | 2146 | i915_reg_t hdmi_reg, enum port port) |
2149 | { | 2147 | { |
2148 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2150 | struct intel_digital_port *intel_dig_port; | 2149 | struct intel_digital_port *intel_dig_port; |
2151 | struct intel_encoder *intel_encoder; | 2150 | struct intel_encoder *intel_encoder; |
2152 | struct intel_connector *intel_connector; | 2151 | struct intel_connector *intel_connector; |
@@ -2215,6 +2214,7 @@ void intel_hdmi_init(struct drm_device *dev, | |||
2215 | intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI; | 2214 | intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI; |
2216 | 2215 | ||
2217 | intel_dig_port->port = port; | 2216 | intel_dig_port->port = port; |
2217 | dev_priv->dig_port_map[port] = intel_encoder; | ||
2218 | intel_dig_port->hdmi.hdmi_reg = hdmi_reg; | 2218 | intel_dig_port->hdmi.hdmi_reg = hdmi_reg; |
2219 | intel_dig_port->dp.output_reg = INVALID_MMIO_REG; | 2219 | intel_dig_port->dp.output_reg = INVALID_MMIO_REG; |
2220 | 2220 | ||
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index b17785719598..a294a3cbaea1 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c | |||
@@ -407,7 +407,7 @@ void intel_hpd_irq_handler(struct drm_device *dev, | |||
407 | * hotplug bits itself. So only WARN about unexpected | 407 | * hotplug bits itself. So only WARN about unexpected |
408 | * interrupts on saner platforms. | 408 | * interrupts on saner platforms. |
409 | */ | 409 | */ |
410 | WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), | 410 | WARN_ONCE(!HAS_GMCH_DISPLAY(dev), |
411 | "Received HPD interrupt on pin %d although disabled\n", i); | 411 | "Received HPD interrupt on pin %d although disabled\n", i); |
412 | continue; | 412 | continue; |
413 | } | 413 | } |
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index e26e22a72e3b..25254b5c1ac5 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -636,7 +636,7 @@ int intel_setup_gmbus(struct drm_device *dev) | |||
636 | if (HAS_PCH_NOP(dev)) | 636 | if (HAS_PCH_NOP(dev)) |
637 | return 0; | 637 | return 0; |
638 | 638 | ||
639 | if (IS_VALLEYVIEW(dev)) | 639 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
640 | dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE; | 640 | dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE; |
641 | else if (!HAS_GMCH_DISPLAY(dev_priv)) | 641 | else if (!HAS_GMCH_DISPLAY(dev_priv)) |
642 | dev_priv->gpio_mmio_base = | 642 | dev_priv->gpio_mmio_base = |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 4ebafab53f30..3aa614731d7e 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
@@ -372,7 +372,7 @@ static int execlists_update_context(struct drm_i915_gem_request *rq) | |||
372 | WARN_ON(!i915_gem_obj_is_pinned(ctx_obj)); | 372 | WARN_ON(!i915_gem_obj_is_pinned(ctx_obj)); |
373 | WARN_ON(!i915_gem_obj_is_pinned(rb_obj)); | 373 | WARN_ON(!i915_gem_obj_is_pinned(rb_obj)); |
374 | 374 | ||
375 | page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); | 375 | page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); |
376 | reg_state = kmap_atomic(page); | 376 | reg_state = kmap_atomic(page); |
377 | 377 | ||
378 | reg_state[CTX_RING_TAIL+1] = rq->tail; | 378 | reg_state[CTX_RING_TAIL+1] = rq->tail; |
@@ -1425,7 +1425,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *ring) | |||
1425 | return ret; | 1425 | return ret; |
1426 | } | 1426 | } |
1427 | 1427 | ||
1428 | page = i915_gem_object_get_page(wa_ctx->obj, 0); | 1428 | page = i915_gem_object_get_dirty_page(wa_ctx->obj, 0); |
1429 | batch = kmap_atomic(page); | 1429 | batch = kmap_atomic(page); |
1430 | offset = 0; | 1430 | offset = 0; |
1431 | 1431 | ||
@@ -1894,8 +1894,10 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring) | |||
1894 | 1894 | ||
1895 | dev_priv = ring->dev->dev_private; | 1895 | dev_priv = ring->dev->dev_private; |
1896 | 1896 | ||
1897 | intel_logical_ring_stop(ring); | 1897 | if (ring->buffer) { |
1898 | WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); | 1898 | intel_logical_ring_stop(ring); |
1899 | WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); | ||
1900 | } | ||
1899 | 1901 | ||
1900 | if (ring->cleanup) | 1902 | if (ring->cleanup) |
1901 | ring->cleanup(ring); | 1903 | ring->cleanup(ring); |
@@ -1909,6 +1911,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring) | |||
1909 | } | 1911 | } |
1910 | 1912 | ||
1911 | lrc_destroy_wa_ctx_obj(ring); | 1913 | lrc_destroy_wa_ctx_obj(ring); |
1914 | ring->dev = NULL; | ||
1912 | } | 1915 | } |
1913 | 1916 | ||
1914 | static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring) | 1917 | static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring) |
@@ -1931,11 +1934,11 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin | |||
1931 | 1934 | ||
1932 | ret = i915_cmd_parser_init_ring(ring); | 1935 | ret = i915_cmd_parser_init_ring(ring); |
1933 | if (ret) | 1936 | if (ret) |
1934 | return ret; | 1937 | goto error; |
1935 | 1938 | ||
1936 | ret = intel_lr_context_deferred_alloc(ring->default_context, ring); | 1939 | ret = intel_lr_context_deferred_alloc(ring->default_context, ring); |
1937 | if (ret) | 1940 | if (ret) |
1938 | return ret; | 1941 | goto error; |
1939 | 1942 | ||
1940 | /* As this is the default context, always pin it */ | 1943 | /* As this is the default context, always pin it */ |
1941 | ret = intel_lr_context_do_pin( | 1944 | ret = intel_lr_context_do_pin( |
@@ -1946,9 +1949,13 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin | |||
1946 | DRM_ERROR( | 1949 | DRM_ERROR( |
1947 | "Failed to pin and map ringbuffer %s: %d\n", | 1950 | "Failed to pin and map ringbuffer %s: %d\n", |
1948 | ring->name, ret); | 1951 | ring->name, ret); |
1949 | return ret; | 1952 | goto error; |
1950 | } | 1953 | } |
1951 | 1954 | ||
1955 | return 0; | ||
1956 | |||
1957 | error: | ||
1958 | intel_logical_ring_cleanup(ring); | ||
1952 | return ret; | 1959 | return ret; |
1953 | } | 1960 | } |
1954 | 1961 | ||
@@ -2257,7 +2264,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o | |||
2257 | 2264 | ||
2258 | /* The second page of the context object contains some fields which must | 2265 | /* The second page of the context object contains some fields which must |
2259 | * be set up prior to the first execution. */ | 2266 | * be set up prior to the first execution. */ |
2260 | page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); | 2267 | page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); |
2261 | reg_state = kmap_atomic(page); | 2268 | reg_state = kmap_atomic(page); |
2262 | 2269 | ||
2263 | /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM | 2270 | /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM |
@@ -2343,9 +2350,6 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o | |||
2343 | } | 2350 | } |
2344 | 2351 | ||
2345 | kunmap_atomic(reg_state); | 2352 | kunmap_atomic(reg_state); |
2346 | |||
2347 | ctx_obj->dirty = 1; | ||
2348 | set_page_dirty(page); | ||
2349 | i915_gem_object_unpin_pages(ctx_obj); | 2353 | i915_gem_object_unpin_pages(ctx_obj); |
2350 | 2354 | ||
2351 | return 0; | 2355 | return 0; |
@@ -2529,7 +2533,7 @@ void intel_lr_context_reset(struct drm_device *dev, | |||
2529 | WARN(1, "Failed get_pages for context obj\n"); | 2533 | WARN(1, "Failed get_pages for context obj\n"); |
2530 | continue; | 2534 | continue; |
2531 | } | 2535 | } |
2532 | page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); | 2536 | page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); |
2533 | reg_state = kmap_atomic(page); | 2537 | reg_state = kmap_atomic(page); |
2534 | 2538 | ||
2535 | reg_state[CTX_RING_HEAD+1] = 0; | 2539 | reg_state[CTX_RING_HEAD+1] = 0; |
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index e362a30776fa..c15718b4862a 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
@@ -26,6 +26,7 @@ | |||
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include <linux/acpi.h> | 28 | #include <linux/acpi.h> |
29 | #include <linux/dmi.h> | ||
29 | #include <acpi/video.h> | 30 | #include <acpi/video.h> |
30 | 31 | ||
31 | #include <drm/drmP.h> | 32 | #include <drm/drmP.h> |
@@ -46,6 +47,7 @@ | |||
46 | #define OPREGION_SWSCI_OFFSET 0x200 | 47 | #define OPREGION_SWSCI_OFFSET 0x200 |
47 | #define OPREGION_ASLE_OFFSET 0x300 | 48 | #define OPREGION_ASLE_OFFSET 0x300 |
48 | #define OPREGION_VBT_OFFSET 0x400 | 49 | #define OPREGION_VBT_OFFSET 0x400 |
50 | #define OPREGION_ASLE_EXT_OFFSET 0x1C00 | ||
49 | 51 | ||
50 | #define OPREGION_SIGNATURE "IntelGraphicsMem" | 52 | #define OPREGION_SIGNATURE "IntelGraphicsMem" |
51 | #define MBOX_ACPI (1<<0) | 53 | #define MBOX_ACPI (1<<0) |
@@ -120,7 +122,16 @@ struct opregion_asle { | |||
120 | u64 fdss; | 122 | u64 fdss; |
121 | u32 fdsp; | 123 | u32 fdsp; |
122 | u32 stat; | 124 | u32 stat; |
123 | u8 rsvd[70]; | 125 | u64 rvda; /* Physical address of raw vbt data */ |
126 | u32 rvds; /* Size of raw vbt data */ | ||
127 | u8 rsvd[58]; | ||
128 | } __packed; | ||
129 | |||
130 | /* OpRegion mailbox #5: ASLE ext */ | ||
131 | struct opregion_asle_ext { | ||
132 | u32 phed; /* Panel Header */ | ||
133 | u8 bddc[256]; /* Panel EDID */ | ||
134 | u8 rsvd[764]; | ||
124 | } __packed; | 135 | } __packed; |
125 | 136 | ||
126 | /* Driver readiness indicator */ | 137 | /* Driver readiness indicator */ |
@@ -411,7 +422,7 @@ int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) | |||
411 | static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) | 422 | static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) |
412 | { | 423 | { |
413 | struct drm_i915_private *dev_priv = dev->dev_private; | 424 | struct drm_i915_private *dev_priv = dev->dev_private; |
414 | struct intel_connector *intel_connector; | 425 | struct intel_connector *connector; |
415 | struct opregion_asle *asle = dev_priv->opregion.asle; | 426 | struct opregion_asle *asle = dev_priv->opregion.asle; |
416 | 427 | ||
417 | DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); | 428 | DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); |
@@ -435,8 +446,8 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) | |||
435 | * only one). | 446 | * only one). |
436 | */ | 447 | */ |
437 | DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp); | 448 | DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp); |
438 | list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) | 449 | for_each_intel_connector(dev, connector) |
439 | intel_panel_set_backlight_acpi(intel_connector, bclp, 255); | 450 | intel_panel_set_backlight_acpi(connector, bclp, 255); |
440 | asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID; | 451 | asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID; |
441 | 452 | ||
442 | drm_modeset_unlock(&dev->mode_config.connection_mutex); | 453 | drm_modeset_unlock(&dev->mode_config.connection_mutex); |
@@ -826,6 +837,10 @@ void intel_opregion_fini(struct drm_device *dev) | |||
826 | 837 | ||
827 | /* just clear all opregion memory pointers now */ | 838 | /* just clear all opregion memory pointers now */ |
828 | memunmap(opregion->header); | 839 | memunmap(opregion->header); |
840 | if (opregion->rvda) { | ||
841 | memunmap(opregion->rvda); | ||
842 | opregion->rvda = NULL; | ||
843 | } | ||
829 | opregion->header = NULL; | 844 | opregion->header = NULL; |
830 | opregion->acpi = NULL; | 845 | opregion->acpi = NULL; |
831 | opregion->swsci = NULL; | 846 | opregion->swsci = NULL; |
@@ -894,6 +909,25 @@ static void swsci_setup(struct drm_device *dev) | |||
894 | static inline void swsci_setup(struct drm_device *dev) {} | 909 | static inline void swsci_setup(struct drm_device *dev) {} |
895 | #endif /* CONFIG_ACPI */ | 910 | #endif /* CONFIG_ACPI */ |
896 | 911 | ||
912 | static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id) | ||
913 | { | ||
914 | DRM_DEBUG_KMS("Falling back to manually reading VBT from " | ||
915 | "VBIOS ROM for %s\n", id->ident); | ||
916 | return 1; | ||
917 | } | ||
918 | |||
919 | static const struct dmi_system_id intel_no_opregion_vbt[] = { | ||
920 | { | ||
921 | .callback = intel_no_opregion_vbt_callback, | ||
922 | .ident = "ThinkCentre A57", | ||
923 | .matches = { | ||
924 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
925 | DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"), | ||
926 | }, | ||
927 | }, | ||
928 | { } | ||
929 | }; | ||
930 | |||
897 | int intel_opregion_setup(struct drm_device *dev) | 931 | int intel_opregion_setup(struct drm_device *dev) |
898 | { | 932 | { |
899 | struct drm_i915_private *dev_priv = dev->dev_private; | 933 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -907,6 +941,7 @@ int intel_opregion_setup(struct drm_device *dev) | |||
907 | BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100); | 941 | BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100); |
908 | BUILD_BUG_ON(sizeof(struct opregion_swsci) != 0x100); | 942 | BUILD_BUG_ON(sizeof(struct opregion_swsci) != 0x100); |
909 | BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100); | 943 | BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100); |
944 | BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400); | ||
910 | 945 | ||
911 | pci_read_config_dword(dev->pdev, PCI_ASLS, &asls); | 946 | pci_read_config_dword(dev->pdev, PCI_ASLS, &asls); |
912 | DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls); | 947 | DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls); |
@@ -931,8 +966,6 @@ int intel_opregion_setup(struct drm_device *dev) | |||
931 | goto err_out; | 966 | goto err_out; |
932 | } | 967 | } |
933 | opregion->header = base; | 968 | opregion->header = base; |
934 | opregion->vbt = base + OPREGION_VBT_OFFSET; | ||
935 | |||
936 | opregion->lid_state = base + ACPI_CLID; | 969 | opregion->lid_state = base + ACPI_CLID; |
937 | 970 | ||
938 | mboxes = opregion->header->mboxes; | 971 | mboxes = opregion->header->mboxes; |
@@ -946,6 +979,7 @@ int intel_opregion_setup(struct drm_device *dev) | |||
946 | opregion->swsci = base + OPREGION_SWSCI_OFFSET; | 979 | opregion->swsci = base + OPREGION_SWSCI_OFFSET; |
947 | swsci_setup(dev); | 980 | swsci_setup(dev); |
948 | } | 981 | } |
982 | |||
949 | if (mboxes & MBOX_ASLE) { | 983 | if (mboxes & MBOX_ASLE) { |
950 | DRM_DEBUG_DRIVER("ASLE supported\n"); | 984 | DRM_DEBUG_DRIVER("ASLE supported\n"); |
951 | opregion->asle = base + OPREGION_ASLE_OFFSET; | 985 | opregion->asle = base + OPREGION_ASLE_OFFSET; |
@@ -953,6 +987,37 @@ int intel_opregion_setup(struct drm_device *dev) | |||
953 | opregion->asle->ardy = ASLE_ARDY_NOT_READY; | 987 | opregion->asle->ardy = ASLE_ARDY_NOT_READY; |
954 | } | 988 | } |
955 | 989 | ||
990 | if (mboxes & MBOX_ASLE_EXT) | ||
991 | DRM_DEBUG_DRIVER("ASLE extension supported\n"); | ||
992 | |||
993 | if (!dmi_check_system(intel_no_opregion_vbt)) { | ||
994 | const void *vbt = NULL; | ||
995 | u32 vbt_size = 0; | ||
996 | |||
997 | if (opregion->header->opregion_ver >= 2 && opregion->asle && | ||
998 | opregion->asle->rvda && opregion->asle->rvds) { | ||
999 | opregion->rvda = memremap(opregion->asle->rvda, | ||
1000 | opregion->asle->rvds, | ||
1001 | MEMREMAP_WB); | ||
1002 | vbt = opregion->rvda; | ||
1003 | vbt_size = opregion->asle->rvds; | ||
1004 | } | ||
1005 | |||
1006 | if (intel_bios_is_valid_vbt(vbt, vbt_size)) { | ||
1007 | DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (RVDA)\n"); | ||
1008 | opregion->vbt = vbt; | ||
1009 | opregion->vbt_size = vbt_size; | ||
1010 | } else { | ||
1011 | vbt = base + OPREGION_VBT_OFFSET; | ||
1012 | vbt_size = OPREGION_ASLE_EXT_OFFSET - OPREGION_VBT_OFFSET; | ||
1013 | if (intel_bios_is_valid_vbt(vbt, vbt_size)) { | ||
1014 | DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n"); | ||
1015 | opregion->vbt = vbt; | ||
1016 | opregion->vbt_size = vbt_size; | ||
1017 | } | ||
1018 | } | ||
1019 | } | ||
1020 | |||
956 | return 0; | 1021 | return 0; |
957 | 1022 | ||
958 | err_out: | 1023 | err_out: |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index ae808b68a44f..21ee6477bf98 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -461,8 +461,7 @@ static inline u32 scale_hw_to_user(struct intel_connector *connector, | |||
461 | static u32 intel_panel_compute_brightness(struct intel_connector *connector, | 461 | static u32 intel_panel_compute_brightness(struct intel_connector *connector, |
462 | u32 val) | 462 | u32 val) |
463 | { | 463 | { |
464 | struct drm_device *dev = connector->base.dev; | 464 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
465 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
466 | struct intel_panel *panel = &connector->panel; | 465 | struct intel_panel *panel = &connector->panel; |
467 | 466 | ||
468 | WARN_ON(panel->backlight.max == 0); | 467 | WARN_ON(panel->backlight.max == 0); |
@@ -480,45 +479,40 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector, | |||
480 | 479 | ||
481 | static u32 lpt_get_backlight(struct intel_connector *connector) | 480 | static u32 lpt_get_backlight(struct intel_connector *connector) |
482 | { | 481 | { |
483 | struct drm_device *dev = connector->base.dev; | 482 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
484 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
485 | 483 | ||
486 | return I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK; | 484 | return I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK; |
487 | } | 485 | } |
488 | 486 | ||
489 | static u32 pch_get_backlight(struct intel_connector *connector) | 487 | static u32 pch_get_backlight(struct intel_connector *connector) |
490 | { | 488 | { |
491 | struct drm_device *dev = connector->base.dev; | 489 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
492 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
493 | 490 | ||
494 | return I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; | 491 | return I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; |
495 | } | 492 | } |
496 | 493 | ||
497 | static u32 i9xx_get_backlight(struct intel_connector *connector) | 494 | static u32 i9xx_get_backlight(struct intel_connector *connector) |
498 | { | 495 | { |
499 | struct drm_device *dev = connector->base.dev; | 496 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
500 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
501 | struct intel_panel *panel = &connector->panel; | 497 | struct intel_panel *panel = &connector->panel; |
502 | u32 val; | 498 | u32 val; |
503 | 499 | ||
504 | val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; | 500 | val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; |
505 | if (INTEL_INFO(dev)->gen < 4) | 501 | if (INTEL_INFO(dev_priv)->gen < 4) |
506 | val >>= 1; | 502 | val >>= 1; |
507 | 503 | ||
508 | if (panel->backlight.combination_mode) { | 504 | if (panel->backlight.combination_mode) { |
509 | u8 lbpc; | 505 | u8 lbpc; |
510 | 506 | ||
511 | pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc); | 507 | pci_read_config_byte(dev_priv->dev->pdev, PCI_LBPC, &lbpc); |
512 | val *= lbpc; | 508 | val *= lbpc; |
513 | } | 509 | } |
514 | 510 | ||
515 | return val; | 511 | return val; |
516 | } | 512 | } |
517 | 513 | ||
518 | static u32 _vlv_get_backlight(struct drm_device *dev, enum pipe pipe) | 514 | static u32 _vlv_get_backlight(struct drm_i915_private *dev_priv, enum pipe pipe) |
519 | { | 515 | { |
520 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
521 | |||
522 | if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B)) | 516 | if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B)) |
523 | return 0; | 517 | return 0; |
524 | 518 | ||
@@ -527,17 +521,16 @@ static u32 _vlv_get_backlight(struct drm_device *dev, enum pipe pipe) | |||
527 | 521 | ||
528 | static u32 vlv_get_backlight(struct intel_connector *connector) | 522 | static u32 vlv_get_backlight(struct intel_connector *connector) |
529 | { | 523 | { |
530 | struct drm_device *dev = connector->base.dev; | 524 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
531 | enum pipe pipe = intel_get_pipe_from_connector(connector); | 525 | enum pipe pipe = intel_get_pipe_from_connector(connector); |
532 | 526 | ||
533 | return _vlv_get_backlight(dev, pipe); | 527 | return _vlv_get_backlight(dev_priv, pipe); |
534 | } | 528 | } |
535 | 529 | ||
536 | static u32 bxt_get_backlight(struct intel_connector *connector) | 530 | static u32 bxt_get_backlight(struct intel_connector *connector) |
537 | { | 531 | { |
538 | struct drm_device *dev = connector->base.dev; | 532 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
539 | struct intel_panel *panel = &connector->panel; | 533 | struct intel_panel *panel = &connector->panel; |
540 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
541 | 534 | ||
542 | return I915_READ(BXT_BLC_PWM_DUTY(panel->backlight.controller)); | 535 | return I915_READ(BXT_BLC_PWM_DUTY(panel->backlight.controller)); |
543 | } | 536 | } |
@@ -553,8 +546,7 @@ static u32 pwm_get_backlight(struct intel_connector *connector) | |||
553 | 546 | ||
554 | static u32 intel_panel_get_backlight(struct intel_connector *connector) | 547 | static u32 intel_panel_get_backlight(struct intel_connector *connector) |
555 | { | 548 | { |
556 | struct drm_device *dev = connector->base.dev; | 549 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
557 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
558 | struct intel_panel *panel = &connector->panel; | 550 | struct intel_panel *panel = &connector->panel; |
559 | u32 val = 0; | 551 | u32 val = 0; |
560 | 552 | ||
@@ -573,16 +565,14 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector) | |||
573 | 565 | ||
574 | static void lpt_set_backlight(struct intel_connector *connector, u32 level) | 566 | static void lpt_set_backlight(struct intel_connector *connector, u32 level) |
575 | { | 567 | { |
576 | struct drm_device *dev = connector->base.dev; | 568 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
577 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
578 | u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK; | 569 | u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK; |
579 | I915_WRITE(BLC_PWM_PCH_CTL2, val | level); | 570 | I915_WRITE(BLC_PWM_PCH_CTL2, val | level); |
580 | } | 571 | } |
581 | 572 | ||
582 | static void pch_set_backlight(struct intel_connector *connector, u32 level) | 573 | static void pch_set_backlight(struct intel_connector *connector, u32 level) |
583 | { | 574 | { |
584 | struct drm_device *dev = connector->base.dev; | 575 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
585 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
586 | u32 tmp; | 576 | u32 tmp; |
587 | 577 | ||
588 | tmp = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; | 578 | tmp = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; |
@@ -591,8 +581,7 @@ static void pch_set_backlight(struct intel_connector *connector, u32 level) | |||
591 | 581 | ||
592 | static void i9xx_set_backlight(struct intel_connector *connector, u32 level) | 582 | static void i9xx_set_backlight(struct intel_connector *connector, u32 level) |
593 | { | 583 | { |
594 | struct drm_device *dev = connector->base.dev; | 584 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
595 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
596 | struct intel_panel *panel = &connector->panel; | 585 | struct intel_panel *panel = &connector->panel; |
597 | u32 tmp, mask; | 586 | u32 tmp, mask; |
598 | 587 | ||
@@ -603,10 +592,10 @@ static void i9xx_set_backlight(struct intel_connector *connector, u32 level) | |||
603 | 592 | ||
604 | lbpc = level * 0xfe / panel->backlight.max + 1; | 593 | lbpc = level * 0xfe / panel->backlight.max + 1; |
605 | level /= lbpc; | 594 | level /= lbpc; |
606 | pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc); | 595 | pci_write_config_byte(dev_priv->dev->pdev, PCI_LBPC, lbpc); |
607 | } | 596 | } |
608 | 597 | ||
609 | if (IS_GEN4(dev)) { | 598 | if (IS_GEN4(dev_priv)) { |
610 | mask = BACKLIGHT_DUTY_CYCLE_MASK; | 599 | mask = BACKLIGHT_DUTY_CYCLE_MASK; |
611 | } else { | 600 | } else { |
612 | level <<= 1; | 601 | level <<= 1; |
@@ -619,8 +608,7 @@ static void i9xx_set_backlight(struct intel_connector *connector, u32 level) | |||
619 | 608 | ||
620 | static void vlv_set_backlight(struct intel_connector *connector, u32 level) | 609 | static void vlv_set_backlight(struct intel_connector *connector, u32 level) |
621 | { | 610 | { |
622 | struct drm_device *dev = connector->base.dev; | 611 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
623 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
624 | enum pipe pipe = intel_get_pipe_from_connector(connector); | 612 | enum pipe pipe = intel_get_pipe_from_connector(connector); |
625 | u32 tmp; | 613 | u32 tmp; |
626 | 614 | ||
@@ -633,8 +621,7 @@ static void vlv_set_backlight(struct intel_connector *connector, u32 level) | |||
633 | 621 | ||
634 | static void bxt_set_backlight(struct intel_connector *connector, u32 level) | 622 | static void bxt_set_backlight(struct intel_connector *connector, u32 level) |
635 | { | 623 | { |
636 | struct drm_device *dev = connector->base.dev; | 624 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
637 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
638 | struct intel_panel *panel = &connector->panel; | 625 | struct intel_panel *panel = &connector->panel; |
639 | 626 | ||
640 | I915_WRITE(BXT_BLC_PWM_DUTY(panel->backlight.controller), level); | 627 | I915_WRITE(BXT_BLC_PWM_DUTY(panel->backlight.controller), level); |
@@ -663,8 +650,7 @@ intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level) | |||
663 | static void intel_panel_set_backlight(struct intel_connector *connector, | 650 | static void intel_panel_set_backlight(struct intel_connector *connector, |
664 | u32 user_level, u32 user_max) | 651 | u32 user_level, u32 user_max) |
665 | { | 652 | { |
666 | struct drm_device *dev = connector->base.dev; | 653 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
667 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
668 | struct intel_panel *panel = &connector->panel; | 654 | struct intel_panel *panel = &connector->panel; |
669 | u32 hw_level; | 655 | u32 hw_level; |
670 | 656 | ||
@@ -690,8 +676,7 @@ static void intel_panel_set_backlight(struct intel_connector *connector, | |||
690 | void intel_panel_set_backlight_acpi(struct intel_connector *connector, | 676 | void intel_panel_set_backlight_acpi(struct intel_connector *connector, |
691 | u32 user_level, u32 user_max) | 677 | u32 user_level, u32 user_max) |
692 | { | 678 | { |
693 | struct drm_device *dev = connector->base.dev; | 679 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
694 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
695 | struct intel_panel *panel = &connector->panel; | 680 | struct intel_panel *panel = &connector->panel; |
696 | enum pipe pipe = intel_get_pipe_from_connector(connector); | 681 | enum pipe pipe = intel_get_pipe_from_connector(connector); |
697 | u32 hw_level; | 682 | u32 hw_level; |
@@ -726,8 +711,7 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector, | |||
726 | 711 | ||
727 | static void lpt_disable_backlight(struct intel_connector *connector) | 712 | static void lpt_disable_backlight(struct intel_connector *connector) |
728 | { | 713 | { |
729 | struct drm_device *dev = connector->base.dev; | 714 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
730 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
731 | u32 tmp; | 715 | u32 tmp; |
732 | 716 | ||
733 | intel_panel_actually_set_backlight(connector, 0); | 717 | intel_panel_actually_set_backlight(connector, 0); |
@@ -752,8 +736,7 @@ static void lpt_disable_backlight(struct intel_connector *connector) | |||
752 | 736 | ||
753 | static void pch_disable_backlight(struct intel_connector *connector) | 737 | static void pch_disable_backlight(struct intel_connector *connector) |
754 | { | 738 | { |
755 | struct drm_device *dev = connector->base.dev; | 739 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
756 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
757 | u32 tmp; | 740 | u32 tmp; |
758 | 741 | ||
759 | intel_panel_actually_set_backlight(connector, 0); | 742 | intel_panel_actually_set_backlight(connector, 0); |
@@ -772,8 +755,7 @@ static void i9xx_disable_backlight(struct intel_connector *connector) | |||
772 | 755 | ||
773 | static void i965_disable_backlight(struct intel_connector *connector) | 756 | static void i965_disable_backlight(struct intel_connector *connector) |
774 | { | 757 | { |
775 | struct drm_device *dev = connector->base.dev; | 758 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
776 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
777 | u32 tmp; | 759 | u32 tmp; |
778 | 760 | ||
779 | intel_panel_actually_set_backlight(connector, 0); | 761 | intel_panel_actually_set_backlight(connector, 0); |
@@ -784,8 +766,7 @@ static void i965_disable_backlight(struct intel_connector *connector) | |||
784 | 766 | ||
785 | static void vlv_disable_backlight(struct intel_connector *connector) | 767 | static void vlv_disable_backlight(struct intel_connector *connector) |
786 | { | 768 | { |
787 | struct drm_device *dev = connector->base.dev; | 769 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
788 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
789 | enum pipe pipe = intel_get_pipe_from_connector(connector); | 770 | enum pipe pipe = intel_get_pipe_from_connector(connector); |
790 | u32 tmp; | 771 | u32 tmp; |
791 | 772 | ||
@@ -800,8 +781,7 @@ static void vlv_disable_backlight(struct intel_connector *connector) | |||
800 | 781 | ||
801 | static void bxt_disable_backlight(struct intel_connector *connector) | 782 | static void bxt_disable_backlight(struct intel_connector *connector) |
802 | { | 783 | { |
803 | struct drm_device *dev = connector->base.dev; | 784 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
804 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
805 | struct intel_panel *panel = &connector->panel; | 785 | struct intel_panel *panel = &connector->panel; |
806 | u32 tmp, val; | 786 | u32 tmp, val; |
807 | 787 | ||
@@ -830,8 +810,7 @@ static void pwm_disable_backlight(struct intel_connector *connector) | |||
830 | 810 | ||
831 | void intel_panel_disable_backlight(struct intel_connector *connector) | 811 | void intel_panel_disable_backlight(struct intel_connector *connector) |
832 | { | 812 | { |
833 | struct drm_device *dev = connector->base.dev; | 813 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
834 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
835 | struct intel_panel *panel = &connector->panel; | 814 | struct intel_panel *panel = &connector->panel; |
836 | 815 | ||
837 | if (!panel->backlight.present) | 816 | if (!panel->backlight.present) |
@@ -843,7 +822,7 @@ void intel_panel_disable_backlight(struct intel_connector *connector) | |||
843 | * backlight. This will leave the backlight on unnecessarily when | 822 | * backlight. This will leave the backlight on unnecessarily when |
844 | * another client is not activated. | 823 | * another client is not activated. |
845 | */ | 824 | */ |
846 | if (dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) { | 825 | if (dev_priv->dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) { |
847 | DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n"); | 826 | DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n"); |
848 | return; | 827 | return; |
849 | } | 828 | } |
@@ -860,8 +839,7 @@ void intel_panel_disable_backlight(struct intel_connector *connector) | |||
860 | 839 | ||
861 | static void lpt_enable_backlight(struct intel_connector *connector) | 840 | static void lpt_enable_backlight(struct intel_connector *connector) |
862 | { | 841 | { |
863 | struct drm_device *dev = connector->base.dev; | 842 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
864 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
865 | struct intel_panel *panel = &connector->panel; | 843 | struct intel_panel *panel = &connector->panel; |
866 | u32 pch_ctl1, pch_ctl2; | 844 | u32 pch_ctl1, pch_ctl2; |
867 | 845 | ||
@@ -893,8 +871,7 @@ static void lpt_enable_backlight(struct intel_connector *connector) | |||
893 | 871 | ||
894 | static void pch_enable_backlight(struct intel_connector *connector) | 872 | static void pch_enable_backlight(struct intel_connector *connector) |
895 | { | 873 | { |
896 | struct drm_device *dev = connector->base.dev; | 874 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
897 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
898 | struct intel_panel *panel = &connector->panel; | 875 | struct intel_panel *panel = &connector->panel; |
899 | enum pipe pipe = intel_get_pipe_from_connector(connector); | 876 | enum pipe pipe = intel_get_pipe_from_connector(connector); |
900 | enum transcoder cpu_transcoder = | 877 | enum transcoder cpu_transcoder = |
@@ -940,8 +917,7 @@ static void pch_enable_backlight(struct intel_connector *connector) | |||
940 | 917 | ||
941 | static void i9xx_enable_backlight(struct intel_connector *connector) | 918 | static void i9xx_enable_backlight(struct intel_connector *connector) |
942 | { | 919 | { |
943 | struct drm_device *dev = connector->base.dev; | 920 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
944 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
945 | struct intel_panel *panel = &connector->panel; | 921 | struct intel_panel *panel = &connector->panel; |
946 | u32 ctl, freq; | 922 | u32 ctl, freq; |
947 | 923 | ||
@@ -958,7 +934,7 @@ static void i9xx_enable_backlight(struct intel_connector *connector) | |||
958 | ctl = freq << 17; | 934 | ctl = freq << 17; |
959 | if (panel->backlight.combination_mode) | 935 | if (panel->backlight.combination_mode) |
960 | ctl |= BLM_LEGACY_MODE; | 936 | ctl |= BLM_LEGACY_MODE; |
961 | if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm) | 937 | if (IS_PINEVIEW(dev_priv) && panel->backlight.active_low_pwm) |
962 | ctl |= BLM_POLARITY_PNV; | 938 | ctl |= BLM_POLARITY_PNV; |
963 | 939 | ||
964 | I915_WRITE(BLC_PWM_CTL, ctl); | 940 | I915_WRITE(BLC_PWM_CTL, ctl); |
@@ -972,14 +948,13 @@ static void i9xx_enable_backlight(struct intel_connector *connector) | |||
972 | * 855gm only, but checking for gen2 is safe, as 855gm is the only gen2 | 948 | * 855gm only, but checking for gen2 is safe, as 855gm is the only gen2 |
973 | * that has backlight. | 949 | * that has backlight. |
974 | */ | 950 | */ |
975 | if (IS_GEN2(dev)) | 951 | if (IS_GEN2(dev_priv)) |
976 | I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE); | 952 | I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE); |
977 | } | 953 | } |
978 | 954 | ||
979 | static void i965_enable_backlight(struct intel_connector *connector) | 955 | static void i965_enable_backlight(struct intel_connector *connector) |
980 | { | 956 | { |
981 | struct drm_device *dev = connector->base.dev; | 957 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
982 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
983 | struct intel_panel *panel = &connector->panel; | 958 | struct intel_panel *panel = &connector->panel; |
984 | enum pipe pipe = intel_get_pipe_from_connector(connector); | 959 | enum pipe pipe = intel_get_pipe_from_connector(connector); |
985 | u32 ctl, ctl2, freq; | 960 | u32 ctl, ctl2, freq; |
@@ -1012,8 +987,7 @@ static void i965_enable_backlight(struct intel_connector *connector) | |||
1012 | 987 | ||
1013 | static void vlv_enable_backlight(struct intel_connector *connector) | 988 | static void vlv_enable_backlight(struct intel_connector *connector) |
1014 | { | 989 | { |
1015 | struct drm_device *dev = connector->base.dev; | 990 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
1016 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1017 | struct intel_panel *panel = &connector->panel; | 991 | struct intel_panel *panel = &connector->panel; |
1018 | enum pipe pipe = intel_get_pipe_from_connector(connector); | 992 | enum pipe pipe = intel_get_pipe_from_connector(connector); |
1019 | u32 ctl, ctl2; | 993 | u32 ctl, ctl2; |
@@ -1044,8 +1018,7 @@ static void vlv_enable_backlight(struct intel_connector *connector) | |||
1044 | 1018 | ||
1045 | static void bxt_enable_backlight(struct intel_connector *connector) | 1019 | static void bxt_enable_backlight(struct intel_connector *connector) |
1046 | { | 1020 | { |
1047 | struct drm_device *dev = connector->base.dev; | 1021 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
1048 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1049 | struct intel_panel *panel = &connector->panel; | 1022 | struct intel_panel *panel = &connector->panel; |
1050 | enum pipe pipe = intel_get_pipe_from_connector(connector); | 1023 | enum pipe pipe = intel_get_pipe_from_connector(connector); |
1051 | u32 pwm_ctl, val; | 1024 | u32 pwm_ctl, val; |
@@ -1102,8 +1075,7 @@ static void pwm_enable_backlight(struct intel_connector *connector) | |||
1102 | 1075 | ||
1103 | void intel_panel_enable_backlight(struct intel_connector *connector) | 1076 | void intel_panel_enable_backlight(struct intel_connector *connector) |
1104 | { | 1077 | { |
1105 | struct drm_device *dev = connector->base.dev; | 1078 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
1106 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1107 | struct intel_panel *panel = &connector->panel; | 1079 | struct intel_panel *panel = &connector->panel; |
1108 | enum pipe pipe = intel_get_pipe_from_connector(connector); | 1080 | enum pipe pipe = intel_get_pipe_from_connector(connector); |
1109 | 1081 | ||
@@ -1278,8 +1250,7 @@ static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) | |||
1278 | */ | 1250 | */ |
1279 | static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) | 1251 | static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) |
1280 | { | 1252 | { |
1281 | struct drm_device *dev = connector->base.dev; | 1253 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
1282 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1283 | u32 mul, clock; | 1254 | u32 mul, clock; |
1284 | 1255 | ||
1285 | if (I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY) | 1256 | if (I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY) |
@@ -1299,8 +1270,7 @@ static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) | |||
1299 | */ | 1270 | */ |
1300 | static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) | 1271 | static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) |
1301 | { | 1272 | { |
1302 | struct drm_device *dev = connector->base.dev; | 1273 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
1303 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1304 | u32 mul, clock; | 1274 | u32 mul, clock; |
1305 | 1275 | ||
1306 | if (I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY) | 1276 | if (I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY) |
@@ -1393,8 +1363,7 @@ static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) | |||
1393 | 1363 | ||
1394 | static u32 get_backlight_max_vbt(struct intel_connector *connector) | 1364 | static u32 get_backlight_max_vbt(struct intel_connector *connector) |
1395 | { | 1365 | { |
1396 | struct drm_device *dev = connector->base.dev; | 1366 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
1397 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1398 | struct intel_panel *panel = &connector->panel; | 1367 | struct intel_panel *panel = &connector->panel; |
1399 | u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz; | 1368 | u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz; |
1400 | u32 pwm; | 1369 | u32 pwm; |
@@ -1427,8 +1396,7 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector) | |||
1427 | */ | 1396 | */ |
1428 | static u32 get_backlight_min_vbt(struct intel_connector *connector) | 1397 | static u32 get_backlight_min_vbt(struct intel_connector *connector) |
1429 | { | 1398 | { |
1430 | struct drm_device *dev = connector->base.dev; | 1399 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
1431 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1432 | struct intel_panel *panel = &connector->panel; | 1400 | struct intel_panel *panel = &connector->panel; |
1433 | int min; | 1401 | int min; |
1434 | 1402 | ||
@@ -1453,8 +1421,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector) | |||
1453 | 1421 | ||
1454 | static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused) | 1422 | static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused) |
1455 | { | 1423 | { |
1456 | struct drm_device *dev = connector->base.dev; | 1424 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
1457 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1458 | struct intel_panel *panel = &connector->panel; | 1425 | struct intel_panel *panel = &connector->panel; |
1459 | u32 pch_ctl1, pch_ctl2, val; | 1426 | u32 pch_ctl1, pch_ctl2, val; |
1460 | 1427 | ||
@@ -1483,8 +1450,7 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus | |||
1483 | 1450 | ||
1484 | static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused) | 1451 | static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused) |
1485 | { | 1452 | { |
1486 | struct drm_device *dev = connector->base.dev; | 1453 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
1487 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1488 | struct intel_panel *panel = &connector->panel; | 1454 | struct intel_panel *panel = &connector->panel; |
1489 | u32 cpu_ctl2, pch_ctl1, pch_ctl2, val; | 1455 | u32 cpu_ctl2, pch_ctl1, pch_ctl2, val; |
1490 | 1456 | ||
@@ -1514,17 +1480,16 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus | |||
1514 | 1480 | ||
1515 | static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused) | 1481 | static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused) |
1516 | { | 1482 | { |
1517 | struct drm_device *dev = connector->base.dev; | 1483 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
1518 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1519 | struct intel_panel *panel = &connector->panel; | 1484 | struct intel_panel *panel = &connector->panel; |
1520 | u32 ctl, val; | 1485 | u32 ctl, val; |
1521 | 1486 | ||
1522 | ctl = I915_READ(BLC_PWM_CTL); | 1487 | ctl = I915_READ(BLC_PWM_CTL); |
1523 | 1488 | ||
1524 | if (IS_GEN2(dev) || IS_I915GM(dev) || IS_I945GM(dev)) | 1489 | if (IS_GEN2(dev_priv) || IS_I915GM(dev_priv) || IS_I945GM(dev_priv)) |
1525 | panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE; | 1490 | panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE; |
1526 | 1491 | ||
1527 | if (IS_PINEVIEW(dev)) | 1492 | if (IS_PINEVIEW(dev_priv)) |
1528 | panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV; | 1493 | panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV; |
1529 | 1494 | ||
1530 | panel->backlight.max = ctl >> 17; | 1495 | panel->backlight.max = ctl >> 17; |
@@ -1552,8 +1517,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu | |||
1552 | 1517 | ||
1553 | static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused) | 1518 | static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused) |
1554 | { | 1519 | { |
1555 | struct drm_device *dev = connector->base.dev; | 1520 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
1556 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1557 | struct intel_panel *panel = &connector->panel; | 1521 | struct intel_panel *panel = &connector->panel; |
1558 | u32 ctl, ctl2, val; | 1522 | u32 ctl, ctl2, val; |
1559 | 1523 | ||
@@ -1586,8 +1550,7 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu | |||
1586 | 1550 | ||
1587 | static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe) | 1551 | static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe) |
1588 | { | 1552 | { |
1589 | struct drm_device *dev = connector->base.dev; | 1553 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
1590 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1591 | struct intel_panel *panel = &connector->panel; | 1554 | struct intel_panel *panel = &connector->panel; |
1592 | u32 ctl, ctl2, val; | 1555 | u32 ctl, ctl2, val; |
1593 | 1556 | ||
@@ -1608,7 +1571,7 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe | |||
1608 | 1571 | ||
1609 | panel->backlight.min = get_backlight_min_vbt(connector); | 1572 | panel->backlight.min = get_backlight_min_vbt(connector); |
1610 | 1573 | ||
1611 | val = _vlv_get_backlight(dev, pipe); | 1574 | val = _vlv_get_backlight(dev_priv, pipe); |
1612 | panel->backlight.level = intel_panel_compute_brightness(connector, val); | 1575 | panel->backlight.level = intel_panel_compute_brightness(connector, val); |
1613 | 1576 | ||
1614 | panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) && | 1577 | panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) && |
@@ -1620,8 +1583,7 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe | |||
1620 | static int | 1583 | static int |
1621 | bxt_setup_backlight(struct intel_connector *connector, enum pipe unused) | 1584 | bxt_setup_backlight(struct intel_connector *connector, enum pipe unused) |
1622 | { | 1585 | { |
1623 | struct drm_device *dev = connector->base.dev; | 1586 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
1624 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1625 | struct intel_panel *panel = &connector->panel; | 1587 | struct intel_panel *panel = &connector->panel; |
1626 | u32 pwm_ctl, val; | 1588 | u32 pwm_ctl, val; |
1627 | 1589 | ||
@@ -1699,8 +1661,7 @@ static int pwm_setup_backlight(struct intel_connector *connector, | |||
1699 | 1661 | ||
1700 | int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe) | 1662 | int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe) |
1701 | { | 1663 | { |
1702 | struct drm_device *dev = connector->dev; | 1664 | struct drm_i915_private *dev_priv = to_i915(connector->dev); |
1703 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1704 | struct intel_connector *intel_connector = to_intel_connector(connector); | 1665 | struct intel_connector *intel_connector = to_intel_connector(connector); |
1705 | struct intel_panel *panel = &intel_connector->panel; | 1666 | struct intel_panel *panel = &intel_connector->panel; |
1706 | int ret; | 1667 | int ret; |
@@ -1755,36 +1716,35 @@ void intel_panel_destroy_backlight(struct drm_connector *connector) | |||
1755 | static void | 1716 | static void |
1756 | intel_panel_init_backlight_funcs(struct intel_panel *panel) | 1717 | intel_panel_init_backlight_funcs(struct intel_panel *panel) |
1757 | { | 1718 | { |
1758 | struct intel_connector *intel_connector = | 1719 | struct intel_connector *connector = |
1759 | container_of(panel, struct intel_connector, panel); | 1720 | container_of(panel, struct intel_connector, panel); |
1760 | struct drm_device *dev = intel_connector->base.dev; | 1721 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
1761 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1762 | 1722 | ||
1763 | if (IS_BROXTON(dev)) { | 1723 | if (IS_BROXTON(dev_priv)) { |
1764 | panel->backlight.setup = bxt_setup_backlight; | 1724 | panel->backlight.setup = bxt_setup_backlight; |
1765 | panel->backlight.enable = bxt_enable_backlight; | 1725 | panel->backlight.enable = bxt_enable_backlight; |
1766 | panel->backlight.disable = bxt_disable_backlight; | 1726 | panel->backlight.disable = bxt_disable_backlight; |
1767 | panel->backlight.set = bxt_set_backlight; | 1727 | panel->backlight.set = bxt_set_backlight; |
1768 | panel->backlight.get = bxt_get_backlight; | 1728 | panel->backlight.get = bxt_get_backlight; |
1769 | panel->backlight.hz_to_pwm = bxt_hz_to_pwm; | 1729 | panel->backlight.hz_to_pwm = bxt_hz_to_pwm; |
1770 | } else if (HAS_PCH_LPT(dev) || HAS_PCH_SPT(dev)) { | 1730 | } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv)) { |
1771 | panel->backlight.setup = lpt_setup_backlight; | 1731 | panel->backlight.setup = lpt_setup_backlight; |
1772 | panel->backlight.enable = lpt_enable_backlight; | 1732 | panel->backlight.enable = lpt_enable_backlight; |
1773 | panel->backlight.disable = lpt_disable_backlight; | 1733 | panel->backlight.disable = lpt_disable_backlight; |
1774 | panel->backlight.set = lpt_set_backlight; | 1734 | panel->backlight.set = lpt_set_backlight; |
1775 | panel->backlight.get = lpt_get_backlight; | 1735 | panel->backlight.get = lpt_get_backlight; |
1776 | if (HAS_PCH_LPT(dev)) | 1736 | if (HAS_PCH_LPT(dev_priv)) |
1777 | panel->backlight.hz_to_pwm = lpt_hz_to_pwm; | 1737 | panel->backlight.hz_to_pwm = lpt_hz_to_pwm; |
1778 | else | 1738 | else |
1779 | panel->backlight.hz_to_pwm = spt_hz_to_pwm; | 1739 | panel->backlight.hz_to_pwm = spt_hz_to_pwm; |
1780 | } else if (HAS_PCH_SPLIT(dev)) { | 1740 | } else if (HAS_PCH_SPLIT(dev_priv)) { |
1781 | panel->backlight.setup = pch_setup_backlight; | 1741 | panel->backlight.setup = pch_setup_backlight; |
1782 | panel->backlight.enable = pch_enable_backlight; | 1742 | panel->backlight.enable = pch_enable_backlight; |
1783 | panel->backlight.disable = pch_disable_backlight; | 1743 | panel->backlight.disable = pch_disable_backlight; |
1784 | panel->backlight.set = pch_set_backlight; | 1744 | panel->backlight.set = pch_set_backlight; |
1785 | panel->backlight.get = pch_get_backlight; | 1745 | panel->backlight.get = pch_get_backlight; |
1786 | panel->backlight.hz_to_pwm = pch_hz_to_pwm; | 1746 | panel->backlight.hz_to_pwm = pch_hz_to_pwm; |
1787 | } else if (IS_VALLEYVIEW(dev)) { | 1747 | } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
1788 | if (dev_priv->vbt.has_mipi) { | 1748 | if (dev_priv->vbt.has_mipi) { |
1789 | panel->backlight.setup = pwm_setup_backlight; | 1749 | panel->backlight.setup = pwm_setup_backlight; |
1790 | panel->backlight.enable = pwm_enable_backlight; | 1750 | panel->backlight.enable = pwm_enable_backlight; |
@@ -1799,7 +1759,7 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel) | |||
1799 | panel->backlight.get = vlv_get_backlight; | 1759 | panel->backlight.get = vlv_get_backlight; |
1800 | panel->backlight.hz_to_pwm = vlv_hz_to_pwm; | 1760 | panel->backlight.hz_to_pwm = vlv_hz_to_pwm; |
1801 | } | 1761 | } |
1802 | } else if (IS_GEN4(dev)) { | 1762 | } else if (IS_GEN4(dev_priv)) { |
1803 | panel->backlight.setup = i965_setup_backlight; | 1763 | panel->backlight.setup = i965_setup_backlight; |
1804 | panel->backlight.enable = i965_enable_backlight; | 1764 | panel->backlight.enable = i965_enable_backlight; |
1805 | panel->backlight.disable = i965_disable_backlight; | 1765 | panel->backlight.disable = i965_disable_backlight; |
@@ -1845,7 +1805,7 @@ void intel_backlight_register(struct drm_device *dev) | |||
1845 | { | 1805 | { |
1846 | struct intel_connector *connector; | 1806 | struct intel_connector *connector; |
1847 | 1807 | ||
1848 | list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) | 1808 | for_each_intel_connector(dev, connector) |
1849 | intel_backlight_device_register(connector); | 1809 | intel_backlight_device_register(connector); |
1850 | } | 1810 | } |
1851 | 1811 | ||
@@ -1853,6 +1813,6 @@ void intel_backlight_unregister(struct drm_device *dev) | |||
1853 | { | 1813 | { |
1854 | struct intel_connector *connector; | 1814 | struct intel_connector *connector; |
1855 | 1815 | ||
1856 | list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) | 1816 | for_each_intel_connector(dev, connector) |
1857 | intel_backlight_device_unregister(connector); | 1817 | intel_backlight_device_unregister(connector); |
1858 | } | 1818 | } |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ee05ce8bf79a..8d0d6f59a72b 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -291,7 +291,7 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) | |||
291 | struct drm_device *dev = dev_priv->dev; | 291 | struct drm_device *dev = dev_priv->dev; |
292 | u32 val; | 292 | u32 val; |
293 | 293 | ||
294 | if (IS_VALLEYVIEW(dev)) { | 294 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
295 | I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); | 295 | I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); |
296 | POSTING_READ(FW_BLC_SELF_VLV); | 296 | POSTING_READ(FW_BLC_SELF_VLV); |
297 | dev_priv->wm.vlv.cxsr = enable; | 297 | dev_priv->wm.vlv.cxsr = enable; |
@@ -3314,7 +3314,7 @@ static void skl_write_wm_values(struct drm_i915_private *dev_priv, | |||
3314 | struct drm_device *dev = dev_priv->dev; | 3314 | struct drm_device *dev = dev_priv->dev; |
3315 | struct intel_crtc *crtc; | 3315 | struct intel_crtc *crtc; |
3316 | 3316 | ||
3317 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { | 3317 | for_each_intel_crtc(dev, crtc) { |
3318 | int i, level, max_level = ilk_wm_max_level(dev); | 3318 | int i, level, max_level = ilk_wm_max_level(dev); |
3319 | enum pipe pipe = crtc->pipe; | 3319 | enum pipe pipe = crtc->pipe; |
3320 | 3320 | ||
@@ -3523,8 +3523,7 @@ static void skl_update_other_pipe_wm(struct drm_device *dev, | |||
3523 | * Otherwise, because of this_crtc being freshly enabled/disabled, the | 3523 | * Otherwise, because of this_crtc being freshly enabled/disabled, the |
3524 | * other active pipes need new DDB allocation and WM values. | 3524 | * other active pipes need new DDB allocation and WM values. |
3525 | */ | 3525 | */ |
3526 | list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, | 3526 | for_each_intel_crtc(dev, intel_crtc) { |
3527 | base.head) { | ||
3528 | struct skl_pipe_wm pipe_wm = {}; | 3527 | struct skl_pipe_wm pipe_wm = {}; |
3529 | bool wm_changed; | 3528 | bool wm_changed; |
3530 | 3529 | ||
@@ -4405,7 +4404,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv) | |||
4405 | 4404 | ||
4406 | mutex_lock(&dev_priv->rps.hw_lock); | 4405 | mutex_lock(&dev_priv->rps.hw_lock); |
4407 | if (dev_priv->rps.enabled) { | 4406 | if (dev_priv->rps.enabled) { |
4408 | if (IS_VALLEYVIEW(dev)) | 4407 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
4409 | vlv_set_rps_idle(dev_priv); | 4408 | vlv_set_rps_idle(dev_priv); |
4410 | else | 4409 | else |
4411 | gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); | 4410 | gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); |
@@ -4458,7 +4457,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv, | |||
4458 | 4457 | ||
4459 | void intel_set_rps(struct drm_device *dev, u8 val) | 4458 | void intel_set_rps(struct drm_device *dev, u8 val) |
4460 | { | 4459 | { |
4461 | if (IS_VALLEYVIEW(dev)) | 4460 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
4462 | valleyview_set_rps(dev, val); | 4461 | valleyview_set_rps(dev, val); |
4463 | else | 4462 | else |
4464 | gen6_set_rps(dev, val); | 4463 | gen6_set_rps(dev, val); |
@@ -4502,7 +4501,7 @@ static void valleyview_disable_rps(struct drm_device *dev) | |||
4502 | 4501 | ||
4503 | static void intel_print_rc6_info(struct drm_device *dev, u32 mode) | 4502 | static void intel_print_rc6_info(struct drm_device *dev, u32 mode) |
4504 | { | 4503 | { |
4505 | if (IS_VALLEYVIEW(dev)) { | 4504 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
4506 | if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1))) | 4505 | if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1))) |
4507 | mode = GEN6_RC_CTL_RC6_ENABLE; | 4506 | mode = GEN6_RC_CTL_RC6_ENABLE; |
4508 | else | 4507 | else |
@@ -4673,8 +4672,7 @@ static void gen9_enable_rc6(struct drm_device *dev) | |||
4673 | /* 2b: Program RC6 thresholds.*/ | 4672 | /* 2b: Program RC6 thresholds.*/ |
4674 | 4673 | ||
4675 | /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */ | 4674 | /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */ |
4676 | if (IS_SKYLAKE(dev) && !((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && | 4675 | if (IS_SKYLAKE(dev)) |
4677 | IS_SKL_REVID(dev, 0, SKL_REVID_E0))) | ||
4678 | I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); | 4676 | I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); |
4679 | else | 4677 | else |
4680 | I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); | 4678 | I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); |
@@ -4717,7 +4715,7 @@ static void gen9_enable_rc6(struct drm_device *dev) | |||
4717 | */ | 4715 | */ |
4718 | if (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || | 4716 | if (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || |
4719 | ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && | 4717 | ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && |
4720 | IS_SKL_REVID(dev, 0, SKL_REVID_E0))) | 4718 | IS_SKL_REVID(dev, 0, SKL_REVID_F0))) |
4721 | I915_WRITE(GEN9_PG_ENABLE, 0); | 4719 | I915_WRITE(GEN9_PG_ENABLE, 0); |
4722 | else | 4720 | else |
4723 | I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? | 4721 | I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? |
@@ -5101,7 +5099,17 @@ static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv) | |||
5101 | 5099 | ||
5102 | static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv) | 5100 | static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv) |
5103 | { | 5101 | { |
5104 | return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff; | 5102 | u32 val; |
5103 | |||
5104 | val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff; | ||
5105 | /* | ||
5106 | * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value | ||
5107 | * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on | ||
5108 | * a BYT-M B0 the above register contains 0xbf. Moreover when setting | ||
5109 | * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0 | ||
5110 | * to make sure it matches what Punit accepts. | ||
5111 | */ | ||
5112 | return max_t(u32, val, 0xc0); | ||
5105 | } | 5113 | } |
5106 | 5114 | ||
5107 | /* Check that the pctx buffer wasn't move under us. */ | 5115 | /* Check that the pctx buffer wasn't move under us. */ |
@@ -6006,7 +6014,17 @@ static void intel_init_emon(struct drm_device *dev) | |||
6006 | 6014 | ||
6007 | void intel_init_gt_powersave(struct drm_device *dev) | 6015 | void intel_init_gt_powersave(struct drm_device *dev) |
6008 | { | 6016 | { |
6017 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6018 | |||
6009 | i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6); | 6019 | i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6); |
6020 | /* | ||
6021 | * RPM depends on RC6 to save restore the GT HW context, so make RC6 a | ||
6022 | * requirement. | ||
6023 | */ | ||
6024 | if (!i915.enable_rc6) { | ||
6025 | DRM_INFO("RC6 disabled, disabling runtime PM support\n"); | ||
6026 | intel_runtime_pm_get(dev_priv); | ||
6027 | } | ||
6010 | 6028 | ||
6011 | if (IS_CHERRYVIEW(dev)) | 6029 | if (IS_CHERRYVIEW(dev)) |
6012 | cherryview_init_gt_powersave(dev); | 6030 | cherryview_init_gt_powersave(dev); |
@@ -6016,10 +6034,15 @@ void intel_init_gt_powersave(struct drm_device *dev) | |||
6016 | 6034 | ||
6017 | void intel_cleanup_gt_powersave(struct drm_device *dev) | 6035 | void intel_cleanup_gt_powersave(struct drm_device *dev) |
6018 | { | 6036 | { |
6037 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6038 | |||
6019 | if (IS_CHERRYVIEW(dev)) | 6039 | if (IS_CHERRYVIEW(dev)) |
6020 | return; | 6040 | return; |
6021 | else if (IS_VALLEYVIEW(dev)) | 6041 | else if (IS_VALLEYVIEW(dev)) |
6022 | valleyview_cleanup_gt_powersave(dev); | 6042 | valleyview_cleanup_gt_powersave(dev); |
6043 | |||
6044 | if (!i915.enable_rc6) | ||
6045 | intel_runtime_pm_put(dev_priv); | ||
6023 | } | 6046 | } |
6024 | 6047 | ||
6025 | static void gen6_suspend_rps(struct drm_device *dev) | 6048 | static void gen6_suspend_rps(struct drm_device *dev) |
@@ -7223,4 +7246,6 @@ void intel_pm_setup(struct drm_device *dev) | |||
7223 | INIT_LIST_HEAD(&dev_priv->rps.mmioflips.link); | 7246 | INIT_LIST_HEAD(&dev_priv->rps.mmioflips.link); |
7224 | 7247 | ||
7225 | dev_priv->pm.suspended = false; | 7248 | dev_priv->pm.suspended = false; |
7249 | atomic_set(&dev_priv->pm.wakeref_count, 0); | ||
7250 | atomic_set(&dev_priv->pm.atomic_seq, 0); | ||
7226 | } | 7251 | } |
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index b6609e648f75..9ccff3011523 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c | |||
@@ -267,25 +267,20 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp) | |||
267 | struct drm_i915_private *dev_priv = dev->dev_private; | 267 | struct drm_i915_private *dev_priv = dev->dev_private; |
268 | 268 | ||
269 | uint32_t max_sleep_time = 0x1f; | 269 | uint32_t max_sleep_time = 0x1f; |
270 | /* Lately it was identified that depending on panel idle frame count | 270 | /* |
271 | * calculated at HW can be off by 1. So let's use what came | 271 | * Let's respect VBT in case VBT asks a higher idle_frame value. |
272 | * from VBT + 1. | 272 | * Let's use 6 as the minimum to cover all known cases including |
273 | * There are also other cases where panel demands at least 4 | 273 | * the off-by-one issue that HW has in some cases. Also there are |
274 | * but VBT is not being set. To cover these 2 cases lets use | 274 | * cases where sink should be able to train |
275 | * at least 5 when VBT isn't set to be on the safest side. | 275 | * with the 5 or 6 idle patterns. |
276 | */ | 276 | */ |
277 | uint32_t idle_frames = dev_priv->vbt.psr.idle_frames ? | 277 | uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames); |
278 | dev_priv->vbt.psr.idle_frames + 1 : 5; | ||
279 | uint32_t val = 0x0; | 278 | uint32_t val = 0x0; |
280 | const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; | ||
281 | 279 | ||
282 | if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) { | 280 | if (IS_HASWELL(dev)) |
283 | /* Sink should be able to train with the 5 or 6 idle patterns */ | 281 | val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; |
284 | idle_frames += 4; | ||
285 | } | ||
286 | 282 | ||
287 | I915_WRITE(EDP_PSR_CTL, val | | 283 | I915_WRITE(EDP_PSR_CTL, val | |
288 | (IS_BROADWELL(dev) ? 0 : link_entry_time) | | ||
289 | max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | | 284 | max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | |
290 | idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | | 285 | idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | |
291 | EDP_PSR_ENABLE); | 286 | EDP_PSR_ENABLE); |
@@ -332,8 +327,8 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp) | |||
332 | return false; | 327 | return false; |
333 | } | 328 | } |
334 | 329 | ||
335 | if (!IS_VALLEYVIEW(dev) && ((dev_priv->vbt.psr.full_link) || | 330 | if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && |
336 | (dig_port->port != PORT_A))) { | 331 | ((dev_priv->vbt.psr.full_link) || (dig_port->port != PORT_A))) { |
337 | DRM_DEBUG_KMS("PSR condition failed: Link Standby requested/needed but not supported on this platform\n"); | 332 | DRM_DEBUG_KMS("PSR condition failed: Link Standby requested/needed but not supported on this platform\n"); |
338 | return false; | 333 | return false; |
339 | } | 334 | } |
@@ -650,7 +645,7 @@ void intel_psr_single_frame_update(struct drm_device *dev, | |||
650 | * Single frame update is already supported on BDW+ but it requires | 645 | * Single frame update is already supported on BDW+ but it requires |
651 | * many W/A and it isn't really needed. | 646 | * many W/A and it isn't really needed. |
652 | */ | 647 | */ |
653 | if (!IS_VALLEYVIEW(dev)) | 648 | if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) |
654 | return; | 649 | return; |
655 | 650 | ||
656 | mutex_lock(&dev_priv->psr.lock); | 651 | mutex_lock(&dev_priv->psr.lock); |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 57d78f264b53..339701d7a9a5 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -27,29 +27,13 @@ | |||
27 | * | 27 | * |
28 | */ | 28 | */ |
29 | 29 | ||
30 | #include <linux/log2.h> | ||
30 | #include <drm/drmP.h> | 31 | #include <drm/drmP.h> |
31 | #include "i915_drv.h" | 32 | #include "i915_drv.h" |
32 | #include <drm/i915_drm.h> | 33 | #include <drm/i915_drm.h> |
33 | #include "i915_trace.h" | 34 | #include "i915_trace.h" |
34 | #include "intel_drv.h" | 35 | #include "intel_drv.h" |
35 | 36 | ||
36 | bool | ||
37 | intel_ring_initialized(struct intel_engine_cs *ring) | ||
38 | { | ||
39 | struct drm_device *dev = ring->dev; | ||
40 | |||
41 | if (!dev) | ||
42 | return false; | ||
43 | |||
44 | if (i915.enable_execlists) { | ||
45 | struct intel_context *dctx = ring->default_context; | ||
46 | struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf; | ||
47 | |||
48 | return ringbuf->obj; | ||
49 | } else | ||
50 | return ring->buffer && ring->buffer->obj; | ||
51 | } | ||
52 | |||
53 | int __intel_ring_space(int head, int tail, int size) | 37 | int __intel_ring_space(int head, int tail, int size) |
54 | { | 38 | { |
55 | int space = head - tail; | 39 | int space = head - tail; |
@@ -995,7 +979,7 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *ring) | |||
995 | * Only consider slices where one, and only one, subslice has 7 | 979 | * Only consider slices where one, and only one, subslice has 7 |
996 | * EUs | 980 | * EUs |
997 | */ | 981 | */ |
998 | if (hweight8(dev_priv->info.subslice_7eu[i]) != 1) | 982 | if (!is_power_of_2(dev_priv->info.subslice_7eu[i])) |
999 | continue; | 983 | continue; |
1000 | 984 | ||
1001 | /* | 985 | /* |
@@ -1034,10 +1018,6 @@ static int skl_init_workarounds(struct intel_engine_cs *ring) | |||
1034 | return ret; | 1018 | return ret; |
1035 | 1019 | ||
1036 | if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) { | 1020 | if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) { |
1037 | /* WaDisableHDCInvalidation:skl */ | ||
1038 | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | | ||
1039 | BDW_DISABLE_HDC_INVALIDATION); | ||
1040 | |||
1041 | /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */ | 1021 | /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */ |
1042 | I915_WRITE(FF_SLICE_CS_CHICKEN2, | 1022 | I915_WRITE(FF_SLICE_CS_CHICKEN2, |
1043 | _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE)); | 1023 | _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE)); |
@@ -1062,7 +1042,7 @@ static int skl_init_workarounds(struct intel_engine_cs *ring) | |||
1062 | WA_SET_BIT_MASKED(HIZ_CHICKEN, | 1042 | WA_SET_BIT_MASKED(HIZ_CHICKEN, |
1063 | BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); | 1043 | BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); |
1064 | 1044 | ||
1065 | if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) { | 1045 | if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) { |
1066 | /* | 1046 | /* |
1067 | *Use Force Non-Coherent whenever executing a 3D context. This | 1047 | *Use Force Non-Coherent whenever executing a 3D context. This |
1068 | * is a workaround for a possible hang in the unlikely event | 1048 | * is a workaround for a possible hang in the unlikely event |
@@ -1071,6 +1051,10 @@ static int skl_init_workarounds(struct intel_engine_cs *ring) | |||
1071 | /* WaForceEnableNonCoherent:skl */ | 1051 | /* WaForceEnableNonCoherent:skl */ |
1072 | WA_SET_BIT_MASKED(HDC_CHICKEN0, | 1052 | WA_SET_BIT_MASKED(HDC_CHICKEN0, |
1073 | HDC_FORCE_NON_COHERENT); | 1053 | HDC_FORCE_NON_COHERENT); |
1054 | |||
1055 | /* WaDisableHDCInvalidation:skl */ | ||
1056 | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | | ||
1057 | BDW_DISABLE_HDC_INVALIDATION); | ||
1074 | } | 1058 | } |
1075 | 1059 | ||
1076 | /* WaBarrierPerformanceFixDisable:skl */ | 1060 | /* WaBarrierPerformanceFixDisable:skl */ |
@@ -2167,8 +2151,10 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
2167 | init_waitqueue_head(&ring->irq_queue); | 2151 | init_waitqueue_head(&ring->irq_queue); |
2168 | 2152 | ||
2169 | ringbuf = intel_engine_create_ringbuffer(ring, 32 * PAGE_SIZE); | 2153 | ringbuf = intel_engine_create_ringbuffer(ring, 32 * PAGE_SIZE); |
2170 | if (IS_ERR(ringbuf)) | 2154 | if (IS_ERR(ringbuf)) { |
2171 | return PTR_ERR(ringbuf); | 2155 | ret = PTR_ERR(ringbuf); |
2156 | goto error; | ||
2157 | } | ||
2172 | ring->buffer = ringbuf; | 2158 | ring->buffer = ringbuf; |
2173 | 2159 | ||
2174 | if (I915_NEED_GFX_HWS(dev)) { | 2160 | if (I915_NEED_GFX_HWS(dev)) { |
@@ -2197,8 +2183,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
2197 | return 0; | 2183 | return 0; |
2198 | 2184 | ||
2199 | error: | 2185 | error: |
2200 | intel_ringbuffer_free(ringbuf); | 2186 | intel_cleanup_ring_buffer(ring); |
2201 | ring->buffer = NULL; | ||
2202 | return ret; | 2187 | return ret; |
2203 | } | 2188 | } |
2204 | 2189 | ||
@@ -2211,12 +2196,14 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) | |||
2211 | 2196 | ||
2212 | dev_priv = to_i915(ring->dev); | 2197 | dev_priv = to_i915(ring->dev); |
2213 | 2198 | ||
2214 | intel_stop_ring_buffer(ring); | 2199 | if (ring->buffer) { |
2215 | WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); | 2200 | intel_stop_ring_buffer(ring); |
2201 | WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); | ||
2216 | 2202 | ||
2217 | intel_unpin_ringbuffer_obj(ring->buffer); | 2203 | intel_unpin_ringbuffer_obj(ring->buffer); |
2218 | intel_ringbuffer_free(ring->buffer); | 2204 | intel_ringbuffer_free(ring->buffer); |
2219 | ring->buffer = NULL; | 2205 | ring->buffer = NULL; |
2206 | } | ||
2220 | 2207 | ||
2221 | if (ring->cleanup) | 2208 | if (ring->cleanup) |
2222 | ring->cleanup(ring); | 2209 | ring->cleanup(ring); |
@@ -2225,6 +2212,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) | |||
2225 | 2212 | ||
2226 | i915_cmd_parser_fini_ring(ring); | 2213 | i915_cmd_parser_fini_ring(ring); |
2227 | i915_gem_batch_pool_fini(&ring->batch_pool); | 2214 | i915_gem_batch_pool_fini(&ring->batch_pool); |
2215 | ring->dev = NULL; | ||
2228 | } | 2216 | } |
2229 | 2217 | ||
2230 | static int ring_wait_for_space(struct intel_engine_cs *ring, int n) | 2218 | static int ring_wait_for_space(struct intel_engine_cs *ring, int n) |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 5d1eb206151d..49574ffe54bc 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -350,7 +350,11 @@ struct intel_engine_cs { | |||
350 | u32 (*get_cmd_length_mask)(u32 cmd_header); | 350 | u32 (*get_cmd_length_mask)(u32 cmd_header); |
351 | }; | 351 | }; |
352 | 352 | ||
353 | bool intel_ring_initialized(struct intel_engine_cs *ring); | 353 | static inline bool |
354 | intel_ring_initialized(struct intel_engine_cs *ring) | ||
355 | { | ||
356 | return ring->dev != NULL; | ||
357 | } | ||
354 | 358 | ||
355 | static inline unsigned | 359 | static inline unsigned |
356 | intel_ring_flag(struct intel_engine_cs *ring) | 360 | intel_ring_flag(struct intel_engine_cs *ring) |
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 2c2151f1c47e..ddbdbffe829a 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c | |||
@@ -538,8 +538,7 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) | |||
538 | 538 | ||
539 | WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), | 539 | WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), |
540 | "DC5 already programmed to be enabled.\n"); | 540 | "DC5 already programmed to be enabled.\n"); |
541 | WARN_ONCE(dev_priv->pm.suspended, | 541 | assert_rpm_wakelock_held(dev_priv); |
542 | "DC5 cannot be enabled, if platform is runtime-suspended.\n"); | ||
543 | 542 | ||
544 | assert_csr_loaded(dev_priv); | 543 | assert_csr_loaded(dev_priv); |
545 | } | 544 | } |
@@ -553,8 +552,7 @@ static void assert_can_disable_dc5(struct drm_i915_private *dev_priv) | |||
553 | if (dev_priv->power_domains.initializing) | 552 | if (dev_priv->power_domains.initializing) |
554 | return; | 553 | return; |
555 | 554 | ||
556 | WARN_ONCE(dev_priv->pm.suspended, | 555 | assert_rpm_wakelock_held(dev_priv); |
557 | "Disabling of DC5 while platform is runtime-suspended should never happen.\n"); | ||
558 | } | 556 | } |
559 | 557 | ||
560 | static void gen9_enable_dc5(struct drm_i915_private *dev_priv) | 558 | static void gen9_enable_dc5(struct drm_i915_private *dev_priv) |
@@ -1975,14 +1973,29 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) | |||
1975 | */ | 1973 | */ |
1976 | void intel_power_domains_fini(struct drm_i915_private *dev_priv) | 1974 | void intel_power_domains_fini(struct drm_i915_private *dev_priv) |
1977 | { | 1975 | { |
1978 | /* The i915.ko module is still not prepared to be loaded when | 1976 | struct device *device = &dev_priv->dev->pdev->dev; |
1977 | |||
1978 | /* | ||
1979 | * The i915.ko module is still not prepared to be loaded when | ||
1979 | * the power well is not enabled, so just enable it in case | 1980 | * the power well is not enabled, so just enable it in case |
1980 | * we're going to unload/reload. */ | 1981 | * we're going to unload/reload. |
1982 | * The following also reacquires the RPM reference the core passed | ||
1983 | * to the driver during loading, which is dropped in | ||
1984 | * intel_runtime_pm_enable(). We have to hand back the control of the | ||
1985 | * device to the core with this reference held. | ||
1986 | */ | ||
1981 | intel_display_set_init_power(dev_priv, true); | 1987 | intel_display_set_init_power(dev_priv, true); |
1982 | 1988 | ||
1983 | /* Remove the refcount we took to keep power well support disabled. */ | 1989 | /* Remove the refcount we took to keep power well support disabled. */ |
1984 | if (!i915.disable_power_well) | 1990 | if (!i915.disable_power_well) |
1985 | intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); | 1991 | intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); |
1992 | |||
1993 | /* | ||
1994 | * Remove the refcount we took in intel_runtime_pm_enable() in case | ||
1995 | * the platform doesn't support runtime PM. | ||
1996 | */ | ||
1997 | if (!HAS_RUNTIME_PM(dev_priv)) | ||
1998 | pm_runtime_put(device); | ||
1986 | } | 1999 | } |
1987 | 2000 | ||
1988 | static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) | 2001 | static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) |
@@ -2226,11 +2239,10 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv) | |||
2226 | struct drm_device *dev = dev_priv->dev; | 2239 | struct drm_device *dev = dev_priv->dev; |
2227 | struct device *device = &dev->pdev->dev; | 2240 | struct device *device = &dev->pdev->dev; |
2228 | 2241 | ||
2229 | if (!HAS_RUNTIME_PM(dev)) | ||
2230 | return; | ||
2231 | |||
2232 | pm_runtime_get_sync(device); | 2242 | pm_runtime_get_sync(device); |
2233 | WARN(dev_priv->pm.suspended, "Device still suspended.\n"); | 2243 | |
2244 | atomic_inc(&dev_priv->pm.wakeref_count); | ||
2245 | assert_rpm_wakelock_held(dev_priv); | ||
2234 | } | 2246 | } |
2235 | 2247 | ||
2236 | /** | 2248 | /** |
@@ -2255,11 +2267,10 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) | |||
2255 | struct drm_device *dev = dev_priv->dev; | 2267 | struct drm_device *dev = dev_priv->dev; |
2256 | struct device *device = &dev->pdev->dev; | 2268 | struct device *device = &dev->pdev->dev; |
2257 | 2269 | ||
2258 | if (!HAS_RUNTIME_PM(dev)) | 2270 | assert_rpm_wakelock_held(dev_priv); |
2259 | return; | ||
2260 | |||
2261 | WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n"); | ||
2262 | pm_runtime_get_noresume(device); | 2271 | pm_runtime_get_noresume(device); |
2272 | |||
2273 | atomic_inc(&dev_priv->pm.wakeref_count); | ||
2263 | } | 2274 | } |
2264 | 2275 | ||
2265 | /** | 2276 | /** |
@@ -2275,8 +2286,9 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv) | |||
2275 | struct drm_device *dev = dev_priv->dev; | 2286 | struct drm_device *dev = dev_priv->dev; |
2276 | struct device *device = &dev->pdev->dev; | 2287 | struct device *device = &dev->pdev->dev; |
2277 | 2288 | ||
2278 | if (!HAS_RUNTIME_PM(dev)) | 2289 | assert_rpm_wakelock_held(dev_priv); |
2279 | return; | 2290 | if (atomic_dec_and_test(&dev_priv->pm.wakeref_count)) |
2291 | atomic_inc(&dev_priv->pm.atomic_seq); | ||
2280 | 2292 | ||
2281 | pm_runtime_mark_last_busy(device); | 2293 | pm_runtime_mark_last_busy(device); |
2282 | pm_runtime_put_autosuspend(device); | 2294 | pm_runtime_put_autosuspend(device); |
@@ -2297,22 +2309,27 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) | |||
2297 | struct drm_device *dev = dev_priv->dev; | 2309 | struct drm_device *dev = dev_priv->dev; |
2298 | struct device *device = &dev->pdev->dev; | 2310 | struct device *device = &dev->pdev->dev; |
2299 | 2311 | ||
2300 | if (!HAS_RUNTIME_PM(dev)) | 2312 | pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */ |
2301 | return; | 2313 | pm_runtime_mark_last_busy(device); |
2302 | 2314 | ||
2303 | /* | 2315 | /* |
2304 | * RPM depends on RC6 to save restore the GT HW context, so make RC6 a | 2316 | * Take a permanent reference to disable the RPM functionality and drop |
2305 | * requirement. | 2317 | * it only when unloading the driver. Use the low level get/put helpers, |
2318 | * so the driver's own RPM reference tracking asserts also work on | ||
2319 | * platforms without RPM support. | ||
2306 | */ | 2320 | */ |
2307 | if (!intel_enable_rc6(dev)) { | 2321 | if (!HAS_RUNTIME_PM(dev)) { |
2308 | DRM_INFO("RC6 disabled, disabling runtime PM support\n"); | 2322 | pm_runtime_dont_use_autosuspend(device); |
2309 | return; | 2323 | pm_runtime_get_sync(device); |
2324 | } else { | ||
2325 | pm_runtime_use_autosuspend(device); | ||
2310 | } | 2326 | } |
2311 | 2327 | ||
2312 | pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */ | 2328 | /* |
2313 | pm_runtime_mark_last_busy(device); | 2329 | * The core calls the driver load handler with an RPM reference held. |
2314 | pm_runtime_use_autosuspend(device); | 2330 | * We drop that here and will reacquire it during unloading in |
2315 | 2331 | * intel_power_domains_fini(). | |
2332 | */ | ||
2316 | pm_runtime_put_autosuspend(device); | 2333 | pm_runtime_put_autosuspend(device); |
2317 | } | 2334 | } |
2318 | 2335 | ||
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index dbf421351b5c..4ff7a1f4183e 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
@@ -951,7 +951,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data, | |||
951 | if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) | 951 | if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) |
952 | return -EINVAL; | 952 | return -EINVAL; |
953 | 953 | ||
954 | if (IS_VALLEYVIEW(dev) && | 954 | if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && |
955 | set->flags & I915_SET_COLORKEY_DESTINATION) | 955 | set->flags & I915_SET_COLORKEY_DESTINATION) |
956 | return -EINVAL; | 956 | return -EINVAL; |
957 | 957 | ||
@@ -1086,7 +1086,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane) | |||
1086 | intel_plane->max_downscale = 1; | 1086 | intel_plane->max_downscale = 1; |
1087 | } | 1087 | } |
1088 | 1088 | ||
1089 | if (IS_VALLEYVIEW(dev)) { | 1089 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
1090 | intel_plane->update_plane = vlv_update_plane; | 1090 | intel_plane->update_plane = vlv_update_plane; |
1091 | intel_plane->disable_plane = vlv_disable_plane; | 1091 | intel_plane->disable_plane = vlv_disable_plane; |
1092 | 1092 | ||
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index c2358ba78b30..277e60ae0e47 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c | |||
@@ -50,13 +50,6 @@ intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id) | |||
50 | return "unknown"; | 50 | return "unknown"; |
51 | } | 51 | } |
52 | 52 | ||
53 | static void | ||
54 | assert_device_not_suspended(struct drm_i915_private *dev_priv) | ||
55 | { | ||
56 | WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended, | ||
57 | "Device suspended\n"); | ||
58 | } | ||
59 | |||
60 | static inline void | 53 | static inline void |
61 | fw_domain_reset(const struct intel_uncore_forcewake_domain *d) | 54 | fw_domain_reset(const struct intel_uncore_forcewake_domain *d) |
62 | { | 55 | { |
@@ -236,7 +229,7 @@ static void intel_uncore_fw_release_timer(unsigned long arg) | |||
236 | struct intel_uncore_forcewake_domain *domain = (void *)arg; | 229 | struct intel_uncore_forcewake_domain *domain = (void *)arg; |
237 | unsigned long irqflags; | 230 | unsigned long irqflags; |
238 | 231 | ||
239 | assert_device_not_suspended(domain->i915); | 232 | assert_rpm_device_not_suspended(domain->i915); |
240 | 233 | ||
241 | spin_lock_irqsave(&domain->i915->uncore.lock, irqflags); | 234 | spin_lock_irqsave(&domain->i915->uncore.lock, irqflags); |
242 | if (WARN_ON(domain->wake_count == 0)) | 235 | if (WARN_ON(domain->wake_count == 0)) |
@@ -411,7 +404,7 @@ void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, | |||
411 | if (!dev_priv->uncore.funcs.force_wake_get) | 404 | if (!dev_priv->uncore.funcs.force_wake_get) |
412 | return; | 405 | return; |
413 | 406 | ||
414 | WARN_ON(dev_priv->pm.suspended); | 407 | assert_rpm_wakelock_held(dev_priv); |
415 | 408 | ||
416 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | 409 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
417 | __intel_uncore_forcewake_get(dev_priv, fw_domains); | 410 | __intel_uncore_forcewake_get(dev_priv, fw_domains); |
@@ -628,7 +621,7 @@ hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv) | |||
628 | 621 | ||
629 | #define GEN2_READ_HEADER(x) \ | 622 | #define GEN2_READ_HEADER(x) \ |
630 | u##x val = 0; \ | 623 | u##x val = 0; \ |
631 | assert_device_not_suspended(dev_priv); | 624 | assert_rpm_wakelock_held(dev_priv); |
632 | 625 | ||
633 | #define GEN2_READ_FOOTER \ | 626 | #define GEN2_READ_FOOTER \ |
634 | trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ | 627 | trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ |
@@ -670,7 +663,7 @@ __gen2_read(64) | |||
670 | u32 offset = i915_mmio_reg_offset(reg); \ | 663 | u32 offset = i915_mmio_reg_offset(reg); \ |
671 | unsigned long irqflags; \ | 664 | unsigned long irqflags; \ |
672 | u##x val = 0; \ | 665 | u##x val = 0; \ |
673 | assert_device_not_suspended(dev_priv); \ | 666 | assert_rpm_wakelock_held(dev_priv); \ |
674 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) | 667 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) |
675 | 668 | ||
676 | #define GEN6_READ_FOOTER \ | 669 | #define GEN6_READ_FOOTER \ |
@@ -803,7 +796,7 @@ __gen6_read(64) | |||
803 | #define VGPU_READ_HEADER(x) \ | 796 | #define VGPU_READ_HEADER(x) \ |
804 | unsigned long irqflags; \ | 797 | unsigned long irqflags; \ |
805 | u##x val = 0; \ | 798 | u##x val = 0; \ |
806 | assert_device_not_suspended(dev_priv); \ | 799 | assert_rpm_device_not_suspended(dev_priv); \ |
807 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) | 800 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) |
808 | 801 | ||
809 | #define VGPU_READ_FOOTER \ | 802 | #define VGPU_READ_FOOTER \ |
@@ -830,7 +823,7 @@ __vgpu_read(64) | |||
830 | 823 | ||
831 | #define GEN2_WRITE_HEADER \ | 824 | #define GEN2_WRITE_HEADER \ |
832 | trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ | 825 | trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ |
833 | assert_device_not_suspended(dev_priv); \ | 826 | assert_rpm_wakelock_held(dev_priv); \ |
834 | 827 | ||
835 | #define GEN2_WRITE_FOOTER | 828 | #define GEN2_WRITE_FOOTER |
836 | 829 | ||
@@ -870,7 +863,7 @@ __gen2_write(64) | |||
870 | u32 offset = i915_mmio_reg_offset(reg); \ | 863 | u32 offset = i915_mmio_reg_offset(reg); \ |
871 | unsigned long irqflags; \ | 864 | unsigned long irqflags; \ |
872 | trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ | 865 | trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ |
873 | assert_device_not_suspended(dev_priv); \ | 866 | assert_rpm_wakelock_held(dev_priv); \ |
874 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) | 867 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) |
875 | 868 | ||
876 | #define GEN6_WRITE_FOOTER \ | 869 | #define GEN6_WRITE_FOOTER \ |
@@ -1046,7 +1039,7 @@ __gen6_write(64) | |||
1046 | #define VGPU_WRITE_HEADER \ | 1039 | #define VGPU_WRITE_HEADER \ |
1047 | unsigned long irqflags; \ | 1040 | unsigned long irqflags; \ |
1048 | trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ | 1041 | trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ |
1049 | assert_device_not_suspended(dev_priv); \ | 1042 | assert_rpm_device_not_suspended(dev_priv); \ |
1050 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) | 1043 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) |
1051 | 1044 | ||
1052 | #define VGPU_WRITE_FOOTER \ | 1045 | #define VGPU_WRITE_FOOTER \ |
@@ -1115,7 +1108,7 @@ static void fw_domain_init(struct drm_i915_private *dev_priv, | |||
1115 | d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL); | 1108 | d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL); |
1116 | } | 1109 | } |
1117 | 1110 | ||
1118 | if (IS_VALLEYVIEW(dev_priv)) | 1111 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
1119 | d->reg_post = FORCEWAKE_ACK_VLV; | 1112 | d->reg_post = FORCEWAKE_ACK_VLV; |
1120 | else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) | 1113 | else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) |
1121 | d->reg_post = ECOBUS; | 1114 | d->reg_post = ECOBUS; |
@@ -1148,7 +1141,7 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) | |||
1148 | FORCEWAKE_ACK_BLITTER_GEN9); | 1141 | FORCEWAKE_ACK_BLITTER_GEN9); |
1149 | fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, | 1142 | fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, |
1150 | FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); | 1143 | FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); |
1151 | } else if (IS_VALLEYVIEW(dev)) { | 1144 | } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
1152 | dev_priv->uncore.funcs.force_wake_get = fw_domains_get; | 1145 | dev_priv->uncore.funcs.force_wake_get = fw_domains_get; |
1153 | if (!IS_CHERRYVIEW(dev)) | 1146 | if (!IS_CHERRYVIEW(dev)) |
1154 | dev_priv->uncore.funcs.force_wake_put = | 1147 | dev_priv->uncore.funcs.force_wake_put = |