diff options
author | Dave Airlie <airlied@redhat.com> | 2016-05-04 03:25:30 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2016-05-04 03:25:30 -0400 |
commit | fffb675106aef326bbd837612ad74c31ab060b93 (patch) | |
tree | d48fe2eaee9346b49b8bc3beec4d235124745f9e | |
parent | b89359bdf0f1e95a4c5f92300594ba9dde323fc4 (diff) | |
parent | 5b4fd5b1111b1230cd037df3b314e7b36d45d483 (diff) |
Merge tag 'drm-intel-next-2016-04-25' of git://anongit.freedesktop.org/drm-intel into drm-next
- more userptr cornercase fixes from Chris
- clean up and tune forcewake handling (Tvrtko)
- more underrun fixes from Ville, mostly for ilk to appeas CI
- fix unclaimed register warnings on vlv/chv and enable the debug code to catch
them by default (Ville)
- skl gpu hang fixes for gt3/4 (Mika Kuoppala)
- edram improvements for gen9+ (Mika again)
- clean up gpu reset corner cases (Chris)
- fix ctx/ring machine deaths on snb/ilk (Chris)
- MOCS programming for all engines (Peter Antoine)
- robustify/clean up vlv/chv irq handler (Ville)
- split gen8+ irq handlers into ack/handle phase (Ville)
- tons of bxt rpm fixes (mostly around firmware interactions), from Imre
- hook up panel fitting for dsi panels (Ville)
- more runtime PM fixes all over from Imre
- shrinker polish (Chris)
- more guc fixes from Alex Dai and Dave Gordon
- tons of bugfixes and small polish all over (but with a big focus on bxt)
* tag 'drm-intel-next-2016-04-25' of git://anongit.freedesktop.org/drm-intel: (142 commits)
drm/i915: Update DRIVER_DATE to 20160425
drm/i915/bxt: Explicitly clear the Turbo control register
drm/i915: Correct the i915_frequency_info debugfs output
drm/i915: Macros to convert PM time interval values to microseconds
drm/i915: Make RPS EI/thresholds multiple of 25 on SNB-BDW
drm/i915: Fake HDMI live status
drm/i915/bxt: Force reprogramming a PHY with invalid HW state
drm/i915/bxt: Wait for PHY1 GRC done if PHY0 was already enabled
drm/i915/bxt: Use PHY0 GRC value for HW state verification
drm/i915: use dev_priv directly in gen8_ppgtt_notify_vgt
drm/i915/bxt: Enable DC5 during runtime resume
drm/i915/bxt: Sanitize DC state tracking during system resume
drm/i915/bxt: Don't uninit/init display core twice during system suspend/resume
drm/i915: Inline intel_suspend_complete
drm/i915/kbl: Don't WARN for expected secondary MISC IO power well request
drm/i915: Fix eDP low vswing for Broadwell
drm/i915: check for ERR_PTR from i915_gem_object_pin_map()
drm/i915/guc: local optimisations and updating comments
drm/i915/guc: drop cached copy of 'wq_head'
drm/i915/guc: keep GuC doorbell & process descriptor mapped in kernel
...
42 files changed, 2689 insertions, 1653 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index 649a562ddf17..8f404103341d 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug | |||
@@ -1,3 +1,20 @@ | |||
1 | config DRM_I915_WERROR | ||
2 | bool "Force GCC to throw an error instead of a warning when compiling" | ||
3 | # As this may inadvertently break the build, only allow the user | ||
4 | # to shoot oneself in the foot iff they aim really hard | ||
5 | depends on EXPERT | ||
6 | # We use the dependency on !COMPILE_TEST to not be enabled in | ||
7 | # allmodconfig or allyesconfig configurations | ||
8 | depends on !COMPILE_TEST | ||
9 | default n | ||
10 | help | ||
11 | Add -Werror to the build flags for (and only for) i915.ko. | ||
12 | Do not enable this unless you are writing code for the i915.ko module. | ||
13 | |||
14 | Recommended for driver developers only. | ||
15 | |||
16 | If in doubt, say "N". | ||
17 | |||
1 | config DRM_I915_DEBUG | 18 | config DRM_I915_DEBUG |
2 | bool "Enable additional driver debugging" | 19 | bool "Enable additional driver debugging" |
3 | depends on DRM_I915 | 20 | depends on DRM_I915 |
@@ -10,3 +27,15 @@ config DRM_I915_DEBUG | |||
10 | 27 | ||
11 | If in doubt, say "N". | 28 | If in doubt, say "N". |
12 | 29 | ||
30 | config DRM_I915_DEBUG_GEM | ||
31 | bool "Insert extra checks into the GEM internals" | ||
32 | default n | ||
33 | depends on DRM_I915_WERROR | ||
34 | help | ||
35 | Enable extra sanity checks (including BUGs) along the GEM driver | ||
36 | paths that may slow the system down and if hit hang the machine. | ||
37 | |||
38 | Recommended for driver developers only. | ||
39 | |||
40 | If in doubt, say "N". | ||
41 | |||
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 7ffb51b0cbc2..0b88ba0f3c1f 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
@@ -2,6 +2,8 @@ | |||
2 | # Makefile for the drm device driver. This driver provides support for the | 2 | # Makefile for the drm device driver. This driver provides support for the |
3 | # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. | 3 | # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. |
4 | 4 | ||
5 | subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror | ||
6 | |||
5 | # Please keep these build lists sorted! | 7 | # Please keep these build lists sorted! |
6 | 8 | ||
7 | # core driver code | 9 | # core driver code |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 85933940fe9c..4950d05d2948 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -89,27 +89,34 @@ static int i915_capabilities(struct seq_file *m, void *data) | |||
89 | return 0; | 89 | return 0; |
90 | } | 90 | } |
91 | 91 | ||
92 | static const char *get_pin_flag(struct drm_i915_gem_object *obj) | 92 | static const char get_active_flag(struct drm_i915_gem_object *obj) |
93 | { | 93 | { |
94 | if (obj->pin_display) | 94 | return obj->active ? '*' : ' '; |
95 | return "p"; | ||
96 | else | ||
97 | return " "; | ||
98 | } | 95 | } |
99 | 96 | ||
100 | static const char *get_tiling_flag(struct drm_i915_gem_object *obj) | 97 | static const char get_pin_flag(struct drm_i915_gem_object *obj) |
98 | { | ||
99 | return obj->pin_display ? 'p' : ' '; | ||
100 | } | ||
101 | |||
102 | static const char get_tiling_flag(struct drm_i915_gem_object *obj) | ||
101 | { | 103 | { |
102 | switch (obj->tiling_mode) { | 104 | switch (obj->tiling_mode) { |
103 | default: | 105 | default: |
104 | case I915_TILING_NONE: return " "; | 106 | case I915_TILING_NONE: return ' '; |
105 | case I915_TILING_X: return "X"; | 107 | case I915_TILING_X: return 'X'; |
106 | case I915_TILING_Y: return "Y"; | 108 | case I915_TILING_Y: return 'Y'; |
107 | } | 109 | } |
108 | } | 110 | } |
109 | 111 | ||
110 | static inline const char *get_global_flag(struct drm_i915_gem_object *obj) | 112 | static inline const char get_global_flag(struct drm_i915_gem_object *obj) |
111 | { | 113 | { |
112 | return i915_gem_obj_to_ggtt(obj) ? "g" : " "; | 114 | return i915_gem_obj_to_ggtt(obj) ? 'g' : ' '; |
115 | } | ||
116 | |||
117 | static inline const char get_pin_mapped_flag(struct drm_i915_gem_object *obj) | ||
118 | { | ||
119 | return obj->mapping ? 'M' : ' '; | ||
113 | } | 120 | } |
114 | 121 | ||
115 | static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj) | 122 | static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj) |
@@ -136,12 +143,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) | |||
136 | 143 | ||
137 | lockdep_assert_held(&obj->base.dev->struct_mutex); | 144 | lockdep_assert_held(&obj->base.dev->struct_mutex); |
138 | 145 | ||
139 | seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ", | 146 | seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x [ ", |
140 | &obj->base, | 147 | &obj->base, |
141 | obj->active ? "*" : " ", | 148 | get_active_flag(obj), |
142 | get_pin_flag(obj), | 149 | get_pin_flag(obj), |
143 | get_tiling_flag(obj), | 150 | get_tiling_flag(obj), |
144 | get_global_flag(obj), | 151 | get_global_flag(obj), |
152 | get_pin_mapped_flag(obj), | ||
145 | obj->base.size / 1024, | 153 | obj->base.size / 1024, |
146 | obj->base.read_domains, | 154 | obj->base.read_domains, |
147 | obj->base.write_domain); | 155 | obj->base.write_domain); |
@@ -435,6 +443,8 @@ static int i915_gem_object_info(struct seq_file *m, void* data) | |||
435 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | 443 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
436 | u32 count, mappable_count, purgeable_count; | 444 | u32 count, mappable_count, purgeable_count; |
437 | u64 size, mappable_size, purgeable_size; | 445 | u64 size, mappable_size, purgeable_size; |
446 | unsigned long pin_mapped_count = 0, pin_mapped_purgeable_count = 0; | ||
447 | u64 pin_mapped_size = 0, pin_mapped_purgeable_size = 0; | ||
438 | struct drm_i915_gem_object *obj; | 448 | struct drm_i915_gem_object *obj; |
439 | struct drm_file *file; | 449 | struct drm_file *file; |
440 | struct i915_vma *vma; | 450 | struct i915_vma *vma; |
@@ -468,6 +478,14 @@ static int i915_gem_object_info(struct seq_file *m, void* data) | |||
468 | size += obj->base.size, ++count; | 478 | size += obj->base.size, ++count; |
469 | if (obj->madv == I915_MADV_DONTNEED) | 479 | if (obj->madv == I915_MADV_DONTNEED) |
470 | purgeable_size += obj->base.size, ++purgeable_count; | 480 | purgeable_size += obj->base.size, ++purgeable_count; |
481 | if (obj->mapping) { | ||
482 | pin_mapped_count++; | ||
483 | pin_mapped_size += obj->base.size; | ||
484 | if (obj->pages_pin_count == 0) { | ||
485 | pin_mapped_purgeable_count++; | ||
486 | pin_mapped_purgeable_size += obj->base.size; | ||
487 | } | ||
488 | } | ||
471 | } | 489 | } |
472 | seq_printf(m, "%u unbound objects, %llu bytes\n", count, size); | 490 | seq_printf(m, "%u unbound objects, %llu bytes\n", count, size); |
473 | 491 | ||
@@ -485,6 +503,14 @@ static int i915_gem_object_info(struct seq_file *m, void* data) | |||
485 | purgeable_size += obj->base.size; | 503 | purgeable_size += obj->base.size; |
486 | ++purgeable_count; | 504 | ++purgeable_count; |
487 | } | 505 | } |
506 | if (obj->mapping) { | ||
507 | pin_mapped_count++; | ||
508 | pin_mapped_size += obj->base.size; | ||
509 | if (obj->pages_pin_count == 0) { | ||
510 | pin_mapped_purgeable_count++; | ||
511 | pin_mapped_purgeable_size += obj->base.size; | ||
512 | } | ||
513 | } | ||
488 | } | 514 | } |
489 | seq_printf(m, "%u purgeable objects, %llu bytes\n", | 515 | seq_printf(m, "%u purgeable objects, %llu bytes\n", |
490 | purgeable_count, purgeable_size); | 516 | purgeable_count, purgeable_size); |
@@ -492,6 +518,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data) | |||
492 | mappable_count, mappable_size); | 518 | mappable_count, mappable_size); |
493 | seq_printf(m, "%u fault mappable objects, %llu bytes\n", | 519 | seq_printf(m, "%u fault mappable objects, %llu bytes\n", |
494 | count, size); | 520 | count, size); |
521 | seq_printf(m, | ||
522 | "%lu [%lu] pin mapped objects, %llu [%llu] bytes [purgeable]\n", | ||
523 | pin_mapped_count, pin_mapped_purgeable_count, | ||
524 | pin_mapped_size, pin_mapped_purgeable_size); | ||
495 | 525 | ||
496 | seq_printf(m, "%llu [%llu] gtt total\n", | 526 | seq_printf(m, "%llu [%llu] gtt total\n", |
497 | ggtt->base.total, ggtt->mappable_end - ggtt->base.start); | 527 | ggtt->base.total, ggtt->mappable_end - ggtt->base.start); |
@@ -1216,12 +1246,12 @@ static int i915_frequency_info(struct seq_file *m, void *unused) | |||
1216 | rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD); | 1246 | rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD); |
1217 | 1247 | ||
1218 | rpstat = I915_READ(GEN6_RPSTAT1); | 1248 | rpstat = I915_READ(GEN6_RPSTAT1); |
1219 | rpupei = I915_READ(GEN6_RP_CUR_UP_EI); | 1249 | rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK; |
1220 | rpcurup = I915_READ(GEN6_RP_CUR_UP); | 1250 | rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK; |
1221 | rpprevup = I915_READ(GEN6_RP_PREV_UP); | 1251 | rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK; |
1222 | rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); | 1252 | rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; |
1223 | rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); | 1253 | rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK; |
1224 | rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); | 1254 | rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK; |
1225 | if (IS_GEN9(dev)) | 1255 | if (IS_GEN9(dev)) |
1226 | cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; | 1256 | cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; |
1227 | else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) | 1257 | else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
@@ -1261,21 +1291,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused) | |||
1261 | seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit); | 1291 | seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit); |
1262 | seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); | 1292 | seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); |
1263 | seq_printf(m, "CAGF: %dMHz\n", cagf); | 1293 | seq_printf(m, "CAGF: %dMHz\n", cagf); |
1264 | seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & | 1294 | seq_printf(m, "RP CUR UP EI: %d (%dus)\n", |
1265 | GEN6_CURICONT_MASK); | 1295 | rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei)); |
1266 | seq_printf(m, "RP CUR UP: %dus\n", rpcurup & | 1296 | seq_printf(m, "RP CUR UP: %d (%dus)\n", |
1267 | GEN6_CURBSYTAVG_MASK); | 1297 | rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup)); |
1268 | seq_printf(m, "RP PREV UP: %dus\n", rpprevup & | 1298 | seq_printf(m, "RP PREV UP: %d (%dus)\n", |
1269 | GEN6_CURBSYTAVG_MASK); | 1299 | rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup)); |
1270 | seq_printf(m, "Up threshold: %d%%\n", | 1300 | seq_printf(m, "Up threshold: %d%%\n", |
1271 | dev_priv->rps.up_threshold); | 1301 | dev_priv->rps.up_threshold); |
1272 | 1302 | ||
1273 | seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & | 1303 | seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n", |
1274 | GEN6_CURIAVG_MASK); | 1304 | rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei)); |
1275 | seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & | 1305 | seq_printf(m, "RP CUR DOWN: %d (%dus)\n", |
1276 | GEN6_CURBSYTAVG_MASK); | 1306 | rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown)); |
1277 | seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & | 1307 | seq_printf(m, "RP PREV DOWN: %d (%dus)\n", |
1278 | GEN6_CURBSYTAVG_MASK); | 1308 | rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown)); |
1279 | seq_printf(m, "Down threshold: %d%%\n", | 1309 | seq_printf(m, "Down threshold: %d%%\n", |
1280 | dev_priv->rps.down_threshold); | 1310 | dev_priv->rps.down_threshold); |
1281 | 1311 | ||
@@ -1469,12 +1499,11 @@ static int i915_forcewake_domains(struct seq_file *m, void *data) | |||
1469 | struct drm_device *dev = node->minor->dev; | 1499 | struct drm_device *dev = node->minor->dev; |
1470 | struct drm_i915_private *dev_priv = dev->dev_private; | 1500 | struct drm_i915_private *dev_priv = dev->dev_private; |
1471 | struct intel_uncore_forcewake_domain *fw_domain; | 1501 | struct intel_uncore_forcewake_domain *fw_domain; |
1472 | int i; | ||
1473 | 1502 | ||
1474 | spin_lock_irq(&dev_priv->uncore.lock); | 1503 | spin_lock_irq(&dev_priv->uncore.lock); |
1475 | for_each_fw_domain(fw_domain, dev_priv, i) { | 1504 | for_each_fw_domain(fw_domain, dev_priv) { |
1476 | seq_printf(m, "%s.wake_count = %u\n", | 1505 | seq_printf(m, "%s.wake_count = %u\n", |
1477 | intel_uncore_forcewake_domain_to_str(i), | 1506 | intel_uncore_forcewake_domain_to_str(fw_domain->id), |
1478 | fw_domain->wake_count); | 1507 | fw_domain->wake_count); |
1479 | } | 1508 | } |
1480 | spin_unlock_irq(&dev_priv->uncore.lock); | 1509 | spin_unlock_irq(&dev_priv->uncore.lock); |
@@ -2405,10 +2434,11 @@ static int i915_llc(struct seq_file *m, void *data) | |||
2405 | struct drm_info_node *node = m->private; | 2434 | struct drm_info_node *node = m->private; |
2406 | struct drm_device *dev = node->minor->dev; | 2435 | struct drm_device *dev = node->minor->dev; |
2407 | struct drm_i915_private *dev_priv = dev->dev_private; | 2436 | struct drm_i915_private *dev_priv = dev->dev_private; |
2437 | const bool edram = INTEL_GEN(dev_priv) > 8; | ||
2408 | 2438 | ||
2409 | /* Size calculation for LLC is a bit of a pain. Ignore for now. */ | ||
2410 | seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev))); | 2439 | seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev))); |
2411 | seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size); | 2440 | seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC", |
2441 | intel_uncore_edram_size(dev_priv)/1024/1024); | ||
2412 | 2442 | ||
2413 | return 0; | 2443 | return 0; |
2414 | } | 2444 | } |
@@ -4723,7 +4753,7 @@ i915_wedged_get(void *data, u64 *val) | |||
4723 | struct drm_device *dev = data; | 4753 | struct drm_device *dev = data; |
4724 | struct drm_i915_private *dev_priv = dev->dev_private; | 4754 | struct drm_i915_private *dev_priv = dev->dev_private; |
4725 | 4755 | ||
4726 | *val = atomic_read(&dev_priv->gpu_error.reset_counter); | 4756 | *val = i915_terminally_wedged(&dev_priv->gpu_error); |
4727 | 4757 | ||
4728 | return 0; | 4758 | return 0; |
4729 | } | 4759 | } |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index b377753717d1..5c7615041b31 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -257,13 +257,6 @@ static int i915_get_bridge_dev(struct drm_device *dev) | |||
257 | return 0; | 257 | return 0; |
258 | } | 258 | } |
259 | 259 | ||
260 | #define MCHBAR_I915 0x44 | ||
261 | #define MCHBAR_I965 0x48 | ||
262 | #define MCHBAR_SIZE (4*4096) | ||
263 | |||
264 | #define DEVEN_REG 0x54 | ||
265 | #define DEVEN_MCHBAR_EN (1 << 28) | ||
266 | |||
267 | /* Allocate space for the MCH regs if needed, return nonzero on error */ | 260 | /* Allocate space for the MCH regs if needed, return nonzero on error */ |
268 | static int | 261 | static int |
269 | intel_alloc_mchbar_resource(struct drm_device *dev) | 262 | intel_alloc_mchbar_resource(struct drm_device *dev) |
@@ -325,7 +318,7 @@ intel_setup_mchbar(struct drm_device *dev) | |||
325 | dev_priv->mchbar_need_disable = false; | 318 | dev_priv->mchbar_need_disable = false; |
326 | 319 | ||
327 | if (IS_I915G(dev) || IS_I915GM(dev)) { | 320 | if (IS_I915G(dev) || IS_I915GM(dev)) { |
328 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); | 321 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp); |
329 | enabled = !!(temp & DEVEN_MCHBAR_EN); | 322 | enabled = !!(temp & DEVEN_MCHBAR_EN); |
330 | } else { | 323 | } else { |
331 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | 324 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); |
@@ -343,7 +336,7 @@ intel_setup_mchbar(struct drm_device *dev) | |||
343 | 336 | ||
344 | /* Space is allocated or reserved, so enable it. */ | 337 | /* Space is allocated or reserved, so enable it. */ |
345 | if (IS_I915G(dev) || IS_I915GM(dev)) { | 338 | if (IS_I915G(dev) || IS_I915GM(dev)) { |
346 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, | 339 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN, |
347 | temp | DEVEN_MCHBAR_EN); | 340 | temp | DEVEN_MCHBAR_EN); |
348 | } else { | 341 | } else { |
349 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | 342 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); |
@@ -356,17 +349,24 @@ intel_teardown_mchbar(struct drm_device *dev) | |||
356 | { | 349 | { |
357 | struct drm_i915_private *dev_priv = dev->dev_private; | 350 | struct drm_i915_private *dev_priv = dev->dev_private; |
358 | int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; | 351 | int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
359 | u32 temp; | ||
360 | 352 | ||
361 | if (dev_priv->mchbar_need_disable) { | 353 | if (dev_priv->mchbar_need_disable) { |
362 | if (IS_I915G(dev) || IS_I915GM(dev)) { | 354 | if (IS_I915G(dev) || IS_I915GM(dev)) { |
363 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); | 355 | u32 deven_val; |
364 | temp &= ~DEVEN_MCHBAR_EN; | 356 | |
365 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp); | 357 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN, |
358 | &deven_val); | ||
359 | deven_val &= ~DEVEN_MCHBAR_EN; | ||
360 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN, | ||
361 | deven_val); | ||
366 | } else { | 362 | } else { |
367 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | 363 | u32 mchbar_val; |
368 | temp &= ~1; | 364 | |
369 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp); | 365 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, |
366 | &mchbar_val); | ||
367 | mchbar_val &= ~1; | ||
368 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, | ||
369 | mchbar_val); | ||
370 | } | 370 | } |
371 | } | 371 | } |
372 | 372 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 29b4e79c85a6..d37c0a671eed 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -567,10 +567,9 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv) | |||
567 | drm_modeset_unlock_all(dev); | 567 | drm_modeset_unlock_all(dev); |
568 | } | 568 | } |
569 | 569 | ||
570 | static int intel_suspend_complete(struct drm_i915_private *dev_priv); | ||
571 | static int vlv_resume_prepare(struct drm_i915_private *dev_priv, | 570 | static int vlv_resume_prepare(struct drm_i915_private *dev_priv, |
572 | bool rpm_resume); | 571 | bool rpm_resume); |
573 | static int bxt_resume_prepare(struct drm_i915_private *dev_priv); | 572 | static int vlv_suspend_complete(struct drm_i915_private *dev_priv); |
574 | 573 | ||
575 | static bool suspend_to_idle(struct drm_i915_private *dev_priv) | 574 | static bool suspend_to_idle(struct drm_i915_private *dev_priv) |
576 | { | 575 | { |
@@ -640,8 +639,7 @@ static int i915_drm_suspend(struct drm_device *dev) | |||
640 | 639 | ||
641 | intel_display_set_init_power(dev_priv, false); | 640 | intel_display_set_init_power(dev_priv, false); |
642 | 641 | ||
643 | if (HAS_CSR(dev_priv)) | 642 | intel_csr_ucode_suspend(dev_priv); |
644 | flush_work(&dev_priv->csr.work); | ||
645 | 643 | ||
646 | out: | 644 | out: |
647 | enable_rpm_wakeref_asserts(dev_priv); | 645 | enable_rpm_wakeref_asserts(dev_priv); |
@@ -657,7 +655,8 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) | |||
657 | 655 | ||
658 | disable_rpm_wakeref_asserts(dev_priv); | 656 | disable_rpm_wakeref_asserts(dev_priv); |
659 | 657 | ||
660 | fw_csr = suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; | 658 | fw_csr = !IS_BROXTON(dev_priv) && |
659 | suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; | ||
661 | /* | 660 | /* |
662 | * In case of firmware assisted context save/restore don't manually | 661 | * In case of firmware assisted context save/restore don't manually |
663 | * deinit the power domains. This also means the CSR/DMC firmware will | 662 | * deinit the power domains. This also means the CSR/DMC firmware will |
@@ -668,7 +667,13 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) | |||
668 | if (!fw_csr) | 667 | if (!fw_csr) |
669 | intel_power_domains_suspend(dev_priv); | 668 | intel_power_domains_suspend(dev_priv); |
670 | 669 | ||
671 | ret = intel_suspend_complete(dev_priv); | 670 | ret = 0; |
671 | if (IS_BROXTON(dev_priv)) | ||
672 | bxt_enable_dc9(dev_priv); | ||
673 | else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | ||
674 | hsw_enable_pc8(dev_priv); | ||
675 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | ||
676 | ret = vlv_suspend_complete(dev_priv); | ||
672 | 677 | ||
673 | if (ret) { | 678 | if (ret) { |
674 | DRM_ERROR("Suspend complete failed: %d\n", ret); | 679 | DRM_ERROR("Suspend complete failed: %d\n", ret); |
@@ -732,6 +737,8 @@ static int i915_drm_resume(struct drm_device *dev) | |||
732 | 737 | ||
733 | disable_rpm_wakeref_asserts(dev_priv); | 738 | disable_rpm_wakeref_asserts(dev_priv); |
734 | 739 | ||
740 | intel_csr_ucode_resume(dev_priv); | ||
741 | |||
735 | mutex_lock(&dev->struct_mutex); | 742 | mutex_lock(&dev->struct_mutex); |
736 | i915_gem_restore_gtt_mappings(dev); | 743 | i915_gem_restore_gtt_mappings(dev); |
737 | mutex_unlock(&dev->struct_mutex); | 744 | mutex_unlock(&dev->struct_mutex); |
@@ -802,7 +809,7 @@ static int i915_drm_resume(struct drm_device *dev) | |||
802 | static int i915_drm_resume_early(struct drm_device *dev) | 809 | static int i915_drm_resume_early(struct drm_device *dev) |
803 | { | 810 | { |
804 | struct drm_i915_private *dev_priv = dev->dev_private; | 811 | struct drm_i915_private *dev_priv = dev->dev_private; |
805 | int ret = 0; | 812 | int ret; |
806 | 813 | ||
807 | /* | 814 | /* |
808 | * We have a resume ordering issue with the snd-hda driver also | 815 | * We have a resume ordering issue with the snd-hda driver also |
@@ -813,6 +820,36 @@ static int i915_drm_resume_early(struct drm_device *dev) | |||
813 | * FIXME: This should be solved with a special hdmi sink device or | 820 | * FIXME: This should be solved with a special hdmi sink device or |
814 | * similar so that power domains can be employed. | 821 | * similar so that power domains can be employed. |
815 | */ | 822 | */ |
823 | |||
824 | /* | ||
825 | * Note that we need to set the power state explicitly, since we | ||
826 | * powered off the device during freeze and the PCI core won't power | ||
827 | * it back up for us during thaw. Powering off the device during | ||
828 | * freeze is not a hard requirement though, and during the | ||
829 | * suspend/resume phases the PCI core makes sure we get here with the | ||
830 | * device powered on. So in case we change our freeze logic and keep | ||
831 | * the device powered we can also remove the following set power state | ||
832 | * call. | ||
833 | */ | ||
834 | ret = pci_set_power_state(dev->pdev, PCI_D0); | ||
835 | if (ret) { | ||
836 | DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret); | ||
837 | goto out; | ||
838 | } | ||
839 | |||
840 | /* | ||
841 | * Note that pci_enable_device() first enables any parent bridge | ||
842 | * device and only then sets the power state for this device. The | ||
843 | * bridge enabling is a nop though, since bridge devices are resumed | ||
844 | * first. The order of enabling power and enabling the device is | ||
845 | * imposed by the PCI core as described above, so here we preserve the | ||
846 | * same order for the freeze/thaw phases. | ||
847 | * | ||
848 | * TODO: eventually we should remove pci_disable_device() / | ||
849 | * pci_enable_enable_device() from suspend/resume. Due to how they | ||
850 | * depend on the device enable refcount we can't anyway depend on them | ||
851 | * disabling/enabling the device. | ||
852 | */ | ||
816 | if (pci_enable_device(dev->pdev)) { | 853 | if (pci_enable_device(dev->pdev)) { |
817 | ret = -EIO; | 854 | ret = -EIO; |
818 | goto out; | 855 | goto out; |
@@ -830,21 +867,25 @@ static int i915_drm_resume_early(struct drm_device *dev) | |||
830 | 867 | ||
831 | intel_uncore_early_sanitize(dev, true); | 868 | intel_uncore_early_sanitize(dev, true); |
832 | 869 | ||
833 | if (IS_BROXTON(dev)) | 870 | if (IS_BROXTON(dev)) { |
834 | ret = bxt_resume_prepare(dev_priv); | 871 | if (!dev_priv->suspended_to_idle) |
835 | else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | 872 | gen9_sanitize_dc_state(dev_priv); |
873 | bxt_disable_dc9(dev_priv); | ||
874 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { | ||
836 | hsw_disable_pc8(dev_priv); | 875 | hsw_disable_pc8(dev_priv); |
876 | } | ||
837 | 877 | ||
838 | intel_uncore_sanitize(dev); | 878 | intel_uncore_sanitize(dev); |
839 | 879 | ||
840 | if (!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) | 880 | if (IS_BROXTON(dev_priv) || |
881 | !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) | ||
841 | intel_power_domains_init_hw(dev_priv, true); | 882 | intel_power_domains_init_hw(dev_priv, true); |
842 | 883 | ||
884 | enable_rpm_wakeref_asserts(dev_priv); | ||
885 | |||
843 | out: | 886 | out: |
844 | dev_priv->suspended_to_idle = false; | 887 | dev_priv->suspended_to_idle = false; |
845 | 888 | ||
846 | enable_rpm_wakeref_asserts(dev_priv); | ||
847 | |||
848 | return ret; | 889 | return ret; |
849 | } | 890 | } |
850 | 891 | ||
@@ -880,23 +921,32 @@ int i915_resume_switcheroo(struct drm_device *dev) | |||
880 | int i915_reset(struct drm_device *dev) | 921 | int i915_reset(struct drm_device *dev) |
881 | { | 922 | { |
882 | struct drm_i915_private *dev_priv = dev->dev_private; | 923 | struct drm_i915_private *dev_priv = dev->dev_private; |
883 | bool simulated; | 924 | struct i915_gpu_error *error = &dev_priv->gpu_error; |
925 | unsigned reset_counter; | ||
884 | int ret; | 926 | int ret; |
885 | 927 | ||
886 | intel_reset_gt_powersave(dev); | 928 | intel_reset_gt_powersave(dev); |
887 | 929 | ||
888 | mutex_lock(&dev->struct_mutex); | 930 | mutex_lock(&dev->struct_mutex); |
889 | 931 | ||
890 | i915_gem_reset(dev); | 932 | /* Clear any previous failed attempts at recovery. Time to try again. */ |
933 | atomic_andnot(I915_WEDGED, &error->reset_counter); | ||
891 | 934 | ||
892 | simulated = dev_priv->gpu_error.stop_rings != 0; | 935 | /* Clear the reset-in-progress flag and increment the reset epoch. */ |
936 | reset_counter = atomic_inc_return(&error->reset_counter); | ||
937 | if (WARN_ON(__i915_reset_in_progress(reset_counter))) { | ||
938 | ret = -EIO; | ||
939 | goto error; | ||
940 | } | ||
941 | |||
942 | i915_gem_reset(dev); | ||
893 | 943 | ||
894 | ret = intel_gpu_reset(dev, ALL_ENGINES); | 944 | ret = intel_gpu_reset(dev, ALL_ENGINES); |
895 | 945 | ||
896 | /* Also reset the gpu hangman. */ | 946 | /* Also reset the gpu hangman. */ |
897 | if (simulated) { | 947 | if (error->stop_rings != 0) { |
898 | DRM_INFO("Simulated gpu hang, resetting stop_rings\n"); | 948 | DRM_INFO("Simulated gpu hang, resetting stop_rings\n"); |
899 | dev_priv->gpu_error.stop_rings = 0; | 949 | error->stop_rings = 0; |
900 | if (ret == -ENODEV) { | 950 | if (ret == -ENODEV) { |
901 | DRM_INFO("Reset not implemented, but ignoring " | 951 | DRM_INFO("Reset not implemented, but ignoring " |
902 | "error for simulated gpu hangs\n"); | 952 | "error for simulated gpu hangs\n"); |
@@ -908,9 +958,11 @@ int i915_reset(struct drm_device *dev) | |||
908 | pr_notice("drm/i915: Resetting chip after gpu hang\n"); | 958 | pr_notice("drm/i915: Resetting chip after gpu hang\n"); |
909 | 959 | ||
910 | if (ret) { | 960 | if (ret) { |
911 | DRM_ERROR("Failed to reset chip: %i\n", ret); | 961 | if (ret != -ENODEV) |
912 | mutex_unlock(&dev->struct_mutex); | 962 | DRM_ERROR("Failed to reset chip: %i\n", ret); |
913 | return ret; | 963 | else |
964 | DRM_DEBUG_DRIVER("GPU reset disabled\n"); | ||
965 | goto error; | ||
914 | } | 966 | } |
915 | 967 | ||
916 | intel_overlay_reset(dev_priv); | 968 | intel_overlay_reset(dev_priv); |
@@ -929,20 +981,14 @@ int i915_reset(struct drm_device *dev) | |||
929 | * was running at the time of the reset (i.e. we weren't VT | 981 | * was running at the time of the reset (i.e. we weren't VT |
930 | * switched away). | 982 | * switched away). |
931 | */ | 983 | */ |
932 | |||
933 | /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */ | ||
934 | dev_priv->gpu_error.reload_in_reset = true; | ||
935 | |||
936 | ret = i915_gem_init_hw(dev); | 984 | ret = i915_gem_init_hw(dev); |
937 | |||
938 | dev_priv->gpu_error.reload_in_reset = false; | ||
939 | |||
940 | mutex_unlock(&dev->struct_mutex); | ||
941 | if (ret) { | 985 | if (ret) { |
942 | DRM_ERROR("Failed hw init on reset %d\n", ret); | 986 | DRM_ERROR("Failed hw init on reset %d\n", ret); |
943 | return ret; | 987 | goto error; |
944 | } | 988 | } |
945 | 989 | ||
990 | mutex_unlock(&dev->struct_mutex); | ||
991 | |||
946 | /* | 992 | /* |
947 | * rps/rc6 re-init is necessary to restore state lost after the | 993 | * rps/rc6 re-init is necessary to restore state lost after the |
948 | * reset and the re-install of gt irqs. Skip for ironlake per | 994 | * reset and the re-install of gt irqs. Skip for ironlake per |
@@ -953,6 +999,11 @@ int i915_reset(struct drm_device *dev) | |||
953 | intel_enable_gt_powersave(dev); | 999 | intel_enable_gt_powersave(dev); |
954 | 1000 | ||
955 | return 0; | 1001 | return 0; |
1002 | |||
1003 | error: | ||
1004 | atomic_or(I915_WEDGED, &error->reset_counter); | ||
1005 | mutex_unlock(&dev->struct_mutex); | ||
1006 | return ret; | ||
956 | } | 1007 | } |
957 | 1008 | ||
958 | static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 1009 | static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
@@ -1059,44 +1110,6 @@ static int i915_pm_resume(struct device *dev) | |||
1059 | return i915_drm_resume(drm_dev); | 1110 | return i915_drm_resume(drm_dev); |
1060 | } | 1111 | } |
1061 | 1112 | ||
1062 | static int hsw_suspend_complete(struct drm_i915_private *dev_priv) | ||
1063 | { | ||
1064 | hsw_enable_pc8(dev_priv); | ||
1065 | |||
1066 | return 0; | ||
1067 | } | ||
1068 | |||
1069 | static int bxt_suspend_complete(struct drm_i915_private *dev_priv) | ||
1070 | { | ||
1071 | struct drm_device *dev = dev_priv->dev; | ||
1072 | |||
1073 | /* TODO: when DC5 support is added disable DC5 here. */ | ||
1074 | |||
1075 | broxton_ddi_phy_uninit(dev); | ||
1076 | broxton_uninit_cdclk(dev); | ||
1077 | bxt_enable_dc9(dev_priv); | ||
1078 | |||
1079 | return 0; | ||
1080 | } | ||
1081 | |||
1082 | static int bxt_resume_prepare(struct drm_i915_private *dev_priv) | ||
1083 | { | ||
1084 | struct drm_device *dev = dev_priv->dev; | ||
1085 | |||
1086 | /* TODO: when CSR FW support is added make sure the FW is loaded */ | ||
1087 | |||
1088 | bxt_disable_dc9(dev_priv); | ||
1089 | |||
1090 | /* | ||
1091 | * TODO: when DC5 support is added enable DC5 here if the CSR FW | ||
1092 | * is available. | ||
1093 | */ | ||
1094 | broxton_init_cdclk(dev); | ||
1095 | broxton_ddi_phy_init(dev); | ||
1096 | |||
1097 | return 0; | ||
1098 | } | ||
1099 | |||
1100 | /* | 1113 | /* |
1101 | * Save all Gunit registers that may be lost after a D3 and a subsequent | 1114 | * Save all Gunit registers that may be lost after a D3 and a subsequent |
1102 | * S0i[R123] transition. The list of registers needing a save/restore is | 1115 | * S0i[R123] transition. The list of registers needing a save/restore is |
@@ -1502,7 +1515,16 @@ static int intel_runtime_suspend(struct device *device) | |||
1502 | intel_suspend_gt_powersave(dev); | 1515 | intel_suspend_gt_powersave(dev); |
1503 | intel_runtime_pm_disable_interrupts(dev_priv); | 1516 | intel_runtime_pm_disable_interrupts(dev_priv); |
1504 | 1517 | ||
1505 | ret = intel_suspend_complete(dev_priv); | 1518 | ret = 0; |
1519 | if (IS_BROXTON(dev_priv)) { | ||
1520 | bxt_display_core_uninit(dev_priv); | ||
1521 | bxt_enable_dc9(dev_priv); | ||
1522 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { | ||
1523 | hsw_enable_pc8(dev_priv); | ||
1524 | } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { | ||
1525 | ret = vlv_suspend_complete(dev_priv); | ||
1526 | } | ||
1527 | |||
1506 | if (ret) { | 1528 | if (ret) { |
1507 | DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); | 1529 | DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); |
1508 | intel_runtime_pm_enable_interrupts(dev_priv); | 1530 | intel_runtime_pm_enable_interrupts(dev_priv); |
@@ -1576,12 +1598,17 @@ static int intel_runtime_resume(struct device *device) | |||
1576 | if (IS_GEN6(dev_priv)) | 1598 | if (IS_GEN6(dev_priv)) |
1577 | intel_init_pch_refclk(dev); | 1599 | intel_init_pch_refclk(dev); |
1578 | 1600 | ||
1579 | if (IS_BROXTON(dev)) | 1601 | if (IS_BROXTON(dev)) { |
1580 | ret = bxt_resume_prepare(dev_priv); | 1602 | bxt_disable_dc9(dev_priv); |
1581 | else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | 1603 | bxt_display_core_init(dev_priv, true); |
1604 | if (dev_priv->csr.dmc_payload && | ||
1605 | (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) | ||
1606 | gen9_enable_dc5(dev_priv); | ||
1607 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { | ||
1582 | hsw_disable_pc8(dev_priv); | 1608 | hsw_disable_pc8(dev_priv); |
1583 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | 1609 | } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
1584 | ret = vlv_resume_prepare(dev_priv, true); | 1610 | ret = vlv_resume_prepare(dev_priv, true); |
1611 | } | ||
1585 | 1612 | ||
1586 | /* | 1613 | /* |
1587 | * No point of rolling back things in case of an error, as the best | 1614 | * No point of rolling back things in case of an error, as the best |
@@ -1612,26 +1639,6 @@ static int intel_runtime_resume(struct device *device) | |||
1612 | return ret; | 1639 | return ret; |
1613 | } | 1640 | } |
1614 | 1641 | ||
1615 | /* | ||
1616 | * This function implements common functionality of runtime and system | ||
1617 | * suspend sequence. | ||
1618 | */ | ||
1619 | static int intel_suspend_complete(struct drm_i915_private *dev_priv) | ||
1620 | { | ||
1621 | int ret; | ||
1622 | |||
1623 | if (IS_BROXTON(dev_priv)) | ||
1624 | ret = bxt_suspend_complete(dev_priv); | ||
1625 | else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | ||
1626 | ret = hsw_suspend_complete(dev_priv); | ||
1627 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | ||
1628 | ret = vlv_suspend_complete(dev_priv); | ||
1629 | else | ||
1630 | ret = 0; | ||
1631 | |||
1632 | return ret; | ||
1633 | } | ||
1634 | |||
1635 | static const struct dev_pm_ops i915_pm_ops = { | 1642 | static const struct dev_pm_ops i915_pm_ops = { |
1636 | /* | 1643 | /* |
1637 | * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, | 1644 | * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a9c8211c8e5e..9d7b54ea14f9 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -33,34 +33,40 @@ | |||
33 | #include <uapi/drm/i915_drm.h> | 33 | #include <uapi/drm/i915_drm.h> |
34 | #include <uapi/drm/drm_fourcc.h> | 34 | #include <uapi/drm/drm_fourcc.h> |
35 | 35 | ||
36 | #include <drm/drmP.h> | ||
37 | #include "i915_params.h" | ||
38 | #include "i915_reg.h" | ||
39 | #include "intel_bios.h" | ||
40 | #include "intel_ringbuffer.h" | ||
41 | #include "intel_lrc.h" | ||
42 | #include "i915_gem_gtt.h" | ||
43 | #include "i915_gem_render_state.h" | ||
44 | #include <linux/io-mapping.h> | 36 | #include <linux/io-mapping.h> |
45 | #include <linux/i2c.h> | 37 | #include <linux/i2c.h> |
46 | #include <linux/i2c-algo-bit.h> | 38 | #include <linux/i2c-algo-bit.h> |
47 | #include <drm/intel-gtt.h> | ||
48 | #include <drm/drm_legacy.h> /* for struct drm_dma_handle */ | ||
49 | #include <drm/drm_gem.h> | ||
50 | #include <linux/backlight.h> | 39 | #include <linux/backlight.h> |
51 | #include <linux/hashtable.h> | 40 | #include <linux/hashtable.h> |
52 | #include <linux/intel-iommu.h> | 41 | #include <linux/intel-iommu.h> |
53 | #include <linux/kref.h> | 42 | #include <linux/kref.h> |
54 | #include <linux/pm_qos.h> | 43 | #include <linux/pm_qos.h> |
55 | #include "intel_guc.h" | 44 | #include <linux/shmem_fs.h> |
45 | |||
46 | #include <drm/drmP.h> | ||
47 | #include <drm/intel-gtt.h> | ||
48 | #include <drm/drm_legacy.h> /* for struct drm_dma_handle */ | ||
49 | #include <drm/drm_gem.h> | ||
50 | |||
51 | #include "i915_params.h" | ||
52 | #include "i915_reg.h" | ||
53 | |||
54 | #include "intel_bios.h" | ||
56 | #include "intel_dpll_mgr.h" | 55 | #include "intel_dpll_mgr.h" |
56 | #include "intel_guc.h" | ||
57 | #include "intel_lrc.h" | ||
58 | #include "intel_ringbuffer.h" | ||
59 | |||
60 | #include "i915_gem.h" | ||
61 | #include "i915_gem_gtt.h" | ||
62 | #include "i915_gem_render_state.h" | ||
57 | 63 | ||
58 | /* General customization: | 64 | /* General customization: |
59 | */ | 65 | */ |
60 | 66 | ||
61 | #define DRIVER_NAME "i915" | 67 | #define DRIVER_NAME "i915" |
62 | #define DRIVER_DESC "Intel Graphics" | 68 | #define DRIVER_DESC "Intel Graphics" |
63 | #define DRIVER_DATE "20160411" | 69 | #define DRIVER_DATE "20160425" |
64 | 70 | ||
65 | #undef WARN_ON | 71 | #undef WARN_ON |
66 | /* Many gcc seem to no see through this and fall over :( */ | 72 | /* Many gcc seem to no see through this and fall over :( */ |
@@ -634,6 +640,13 @@ enum forcewake_domains { | |||
634 | FORCEWAKE_MEDIA) | 640 | FORCEWAKE_MEDIA) |
635 | }; | 641 | }; |
636 | 642 | ||
643 | #define FW_REG_READ (1) | ||
644 | #define FW_REG_WRITE (2) | ||
645 | |||
646 | enum forcewake_domains | ||
647 | intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, | ||
648 | i915_reg_t reg, unsigned int op); | ||
649 | |||
637 | struct intel_uncore_funcs { | 650 | struct intel_uncore_funcs { |
638 | void (*force_wake_get)(struct drm_i915_private *dev_priv, | 651 | void (*force_wake_get)(struct drm_i915_private *dev_priv, |
639 | enum forcewake_domains domains); | 652 | enum forcewake_domains domains); |
@@ -666,8 +679,9 @@ struct intel_uncore { | |||
666 | struct intel_uncore_forcewake_domain { | 679 | struct intel_uncore_forcewake_domain { |
667 | struct drm_i915_private *i915; | 680 | struct drm_i915_private *i915; |
668 | enum forcewake_domain_id id; | 681 | enum forcewake_domain_id id; |
682 | enum forcewake_domains mask; | ||
669 | unsigned wake_count; | 683 | unsigned wake_count; |
670 | struct timer_list timer; | 684 | struct hrtimer timer; |
671 | i915_reg_t reg_set; | 685 | i915_reg_t reg_set; |
672 | u32 val_set; | 686 | u32 val_set; |
673 | u32 val_clear; | 687 | u32 val_clear; |
@@ -680,14 +694,14 @@ struct intel_uncore { | |||
680 | }; | 694 | }; |
681 | 695 | ||
682 | /* Iterate over initialised fw domains */ | 696 | /* Iterate over initialised fw domains */ |
683 | #define for_each_fw_domain_mask(domain__, mask__, dev_priv__, i__) \ | 697 | #define for_each_fw_domain_masked(domain__, mask__, dev_priv__) \ |
684 | for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \ | 698 | for ((domain__) = &(dev_priv__)->uncore.fw_domain[0]; \ |
685 | (i__) < FW_DOMAIN_ID_COUNT; \ | 699 | (domain__) < &(dev_priv__)->uncore.fw_domain[FW_DOMAIN_ID_COUNT]; \ |
686 | (i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \ | 700 | (domain__)++) \ |
687 | for_each_if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__))) | 701 | for_each_if ((mask__) & (domain__)->mask) |
688 | 702 | ||
689 | #define for_each_fw_domain(domain__, dev_priv__, i__) \ | 703 | #define for_each_fw_domain(domain__, dev_priv__) \ |
690 | for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__) | 704 | for_each_fw_domain_masked(domain__, FORCEWAKE_ALL, dev_priv__) |
691 | 705 | ||
692 | #define CSR_VERSION(major, minor) ((major) << 16 | (minor)) | 706 | #define CSR_VERSION(major, minor) ((major) << 16 | (minor)) |
693 | #define CSR_VERSION_MAJOR(version) ((version) >> 16) | 707 | #define CSR_VERSION_MAJOR(version) ((version) >> 16) |
@@ -996,6 +1010,7 @@ struct intel_fbc_work; | |||
996 | 1010 | ||
997 | struct intel_gmbus { | 1011 | struct intel_gmbus { |
998 | struct i2c_adapter adapter; | 1012 | struct i2c_adapter adapter; |
1013 | #define GMBUS_FORCE_BIT_RETRY (1U << 31) | ||
999 | u32 force_bit; | 1014 | u32 force_bit; |
1000 | u32 reg0; | 1015 | u32 reg0; |
1001 | i915_reg_t gpio_reg; | 1016 | i915_reg_t gpio_reg; |
@@ -1385,9 +1400,6 @@ struct i915_gpu_error { | |||
1385 | 1400 | ||
1386 | /* For missed irq/seqno simulation. */ | 1401 | /* For missed irq/seqno simulation. */ |
1387 | unsigned int test_irq_rings; | 1402 | unsigned int test_irq_rings; |
1388 | |||
1389 | /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */ | ||
1390 | bool reload_in_reset; | ||
1391 | }; | 1403 | }; |
1392 | 1404 | ||
1393 | enum modeset_restore { | 1405 | enum modeset_restore { |
@@ -1444,6 +1456,7 @@ struct intel_vbt_data { | |||
1444 | unsigned int lvds_use_ssc:1; | 1456 | unsigned int lvds_use_ssc:1; |
1445 | unsigned int display_clock_mode:1; | 1457 | unsigned int display_clock_mode:1; |
1446 | unsigned int fdi_rx_polarity_inverted:1; | 1458 | unsigned int fdi_rx_polarity_inverted:1; |
1459 | unsigned int panel_type:4; | ||
1447 | int lvds_ssc_freq; | 1460 | int lvds_ssc_freq; |
1448 | unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ | 1461 | unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ |
1449 | 1462 | ||
@@ -1863,7 +1876,7 @@ struct drm_i915_private { | |||
1863 | struct intel_l3_parity l3_parity; | 1876 | struct intel_l3_parity l3_parity; |
1864 | 1877 | ||
1865 | /* Cannot be determined by PCIID. You must always read a register. */ | 1878 | /* Cannot be determined by PCIID. You must always read a register. */ |
1866 | size_t ellc_size; | 1879 | u32 edram_cap; |
1867 | 1880 | ||
1868 | /* gen6+ rps state */ | 1881 | /* gen6+ rps state */ |
1869 | struct intel_gen6_power_mgmt rps; | 1882 | struct intel_gen6_power_mgmt rps; |
@@ -1911,6 +1924,7 @@ struct drm_i915_private { | |||
1911 | * crappiness (can't read out DPLL_MD for pipes B & C). | 1924 | * crappiness (can't read out DPLL_MD for pipes B & C). |
1912 | */ | 1925 | */ |
1913 | u32 chv_dpll_md[I915_MAX_PIPES]; | 1926 | u32 chv_dpll_md[I915_MAX_PIPES]; |
1927 | u32 bxt_phy_grc; | ||
1914 | 1928 | ||
1915 | u32 suspend_count; | 1929 | u32 suspend_count; |
1916 | bool suspended_to_idle; | 1930 | bool suspended_to_idle; |
@@ -2237,6 +2251,7 @@ struct drm_i915_gem_request { | |||
2237 | /** On Which ring this request was generated */ | 2251 | /** On Which ring this request was generated */ |
2238 | struct drm_i915_private *i915; | 2252 | struct drm_i915_private *i915; |
2239 | struct intel_engine_cs *engine; | 2253 | struct intel_engine_cs *engine; |
2254 | unsigned reset_counter; | ||
2240 | 2255 | ||
2241 | /** GEM sequence number associated with the previous request, | 2256 | /** GEM sequence number associated with the previous request, |
2242 | * when the HWS breadcrumb is equal to this the GPU is processing | 2257 | * when the HWS breadcrumb is equal to this the GPU is processing |
@@ -2317,7 +2332,6 @@ struct drm_i915_gem_request { | |||
2317 | struct drm_i915_gem_request * __must_check | 2332 | struct drm_i915_gem_request * __must_check |
2318 | i915_gem_request_alloc(struct intel_engine_cs *engine, | 2333 | i915_gem_request_alloc(struct intel_engine_cs *engine, |
2319 | struct intel_context *ctx); | 2334 | struct intel_context *ctx); |
2320 | void i915_gem_request_cancel(struct drm_i915_gem_request *req); | ||
2321 | void i915_gem_request_free(struct kref *req_ref); | 2335 | void i915_gem_request_free(struct kref *req_ref); |
2322 | int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, | 2336 | int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, |
2323 | struct drm_file *file); | 2337 | struct drm_file *file); |
@@ -2487,6 +2501,7 @@ struct drm_i915_cmd_table { | |||
2487 | __p; \ | 2501 | __p; \ |
2488 | }) | 2502 | }) |
2489 | #define INTEL_INFO(p) (&__I915__(p)->info) | 2503 | #define INTEL_INFO(p) (&__I915__(p)->info) |
2504 | #define INTEL_GEN(p) (INTEL_INFO(p)->gen) | ||
2490 | #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) | 2505 | #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) |
2491 | #define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision) | 2506 | #define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision) |
2492 | 2507 | ||
@@ -2613,8 +2628,9 @@ struct drm_i915_cmd_table { | |||
2613 | #define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING) | 2628 | #define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING) |
2614 | #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) | 2629 | #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) |
2615 | #define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop) | 2630 | #define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop) |
2631 | #define HAS_EDRAM(dev) (__I915__(dev)->edram_cap & EDRAM_ENABLED) | ||
2616 | #define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \ | 2632 | #define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \ |
2617 | __I915__(dev)->ellc_size) | 2633 | HAS_EDRAM(dev)) |
2618 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) | 2634 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) |
2619 | 2635 | ||
2620 | #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) | 2636 | #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) |
@@ -2631,8 +2647,9 @@ struct drm_i915_cmd_table { | |||
2631 | 2647 | ||
2632 | /* WaRsDisableCoarsePowerGating:skl,bxt */ | 2648 | /* WaRsDisableCoarsePowerGating:skl,bxt */ |
2633 | #define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \ | 2649 | #define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \ |
2634 | ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \ | 2650 | IS_SKL_GT3(dev) || \ |
2635 | IS_SKL_REVID(dev, 0, SKL_REVID_F0))) | 2651 | IS_SKL_GT4(dev)) |
2652 | |||
2636 | /* | 2653 | /* |
2637 | * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts | 2654 | * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts |
2638 | * even when in MSI mode. This results in spurious interrupt warnings if the | 2655 | * even when in MSI mode. This results in spurious interrupt warnings if the |
@@ -2667,7 +2684,7 @@ struct drm_i915_cmd_table { | |||
2667 | #define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ | 2684 | #define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ |
2668 | IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \ | 2685 | IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \ |
2669 | IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \ | 2686 | IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \ |
2670 | IS_KABYLAKE(dev)) | 2687 | IS_KABYLAKE(dev) || IS_BROXTON(dev)) |
2671 | #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) | 2688 | #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) |
2672 | #define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) | 2689 | #define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) |
2673 | 2690 | ||
@@ -2791,6 +2808,8 @@ void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, | |||
2791 | enum forcewake_domains domains); | 2808 | enum forcewake_domains domains); |
2792 | void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, | 2809 | void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, |
2793 | enum forcewake_domains domains); | 2810 | enum forcewake_domains domains); |
2811 | u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); | ||
2812 | |||
2794 | void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); | 2813 | void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); |
2795 | static inline bool intel_vgpu_active(struct drm_device *dev) | 2814 | static inline bool intel_vgpu_active(struct drm_device *dev) |
2796 | { | 2815 | { |
@@ -2869,7 +2888,6 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |||
2869 | struct drm_file *file_priv); | 2888 | struct drm_file *file_priv); |
2870 | void i915_gem_execbuffer_move_to_active(struct list_head *vmas, | 2889 | void i915_gem_execbuffer_move_to_active(struct list_head *vmas, |
2871 | struct drm_i915_gem_request *req); | 2890 | struct drm_i915_gem_request *req); |
2872 | void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params); | ||
2873 | int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, | 2891 | int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, |
2874 | struct drm_i915_gem_execbuffer2 *args, | 2892 | struct drm_i915_gem_execbuffer2 *args, |
2875 | struct list_head *vmas); | 2893 | struct list_head *vmas); |
@@ -3000,9 +3018,11 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) | |||
3000 | * pages and then returns a contiguous mapping of the backing storage into | 3018 | * pages and then returns a contiguous mapping of the backing storage into |
3001 | * the kernel address space. | 3019 | * the kernel address space. |
3002 | * | 3020 | * |
3003 | * The caller must hold the struct_mutex. | 3021 | * The caller must hold the struct_mutex, and is responsible for calling |
3022 | * i915_gem_object_unpin_map() when the mapping is no longer required. | ||
3004 | * | 3023 | * |
3005 | * Returns the pointer through which to access the backing storage. | 3024 | * Returns the pointer through which to access the mapped object, or an |
3025 | * ERR_PTR() on error. | ||
3006 | */ | 3026 | */ |
3007 | void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj); | 3027 | void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj); |
3008 | 3028 | ||
@@ -3069,23 +3089,45 @@ i915_gem_find_active_request(struct intel_engine_cs *engine); | |||
3069 | 3089 | ||
3070 | bool i915_gem_retire_requests(struct drm_device *dev); | 3090 | bool i915_gem_retire_requests(struct drm_device *dev); |
3071 | void i915_gem_retire_requests_ring(struct intel_engine_cs *engine); | 3091 | void i915_gem_retire_requests_ring(struct intel_engine_cs *engine); |
3072 | int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, | 3092 | |
3073 | bool interruptible); | 3093 | static inline u32 i915_reset_counter(struct i915_gpu_error *error) |
3094 | { | ||
3095 | return atomic_read(&error->reset_counter); | ||
3096 | } | ||
3097 | |||
3098 | static inline bool __i915_reset_in_progress(u32 reset) | ||
3099 | { | ||
3100 | return unlikely(reset & I915_RESET_IN_PROGRESS_FLAG); | ||
3101 | } | ||
3102 | |||
3103 | static inline bool __i915_reset_in_progress_or_wedged(u32 reset) | ||
3104 | { | ||
3105 | return unlikely(reset & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED)); | ||
3106 | } | ||
3107 | |||
3108 | static inline bool __i915_terminally_wedged(u32 reset) | ||
3109 | { | ||
3110 | return unlikely(reset & I915_WEDGED); | ||
3111 | } | ||
3074 | 3112 | ||
3075 | static inline bool i915_reset_in_progress(struct i915_gpu_error *error) | 3113 | static inline bool i915_reset_in_progress(struct i915_gpu_error *error) |
3076 | { | 3114 | { |
3077 | return unlikely(atomic_read(&error->reset_counter) | 3115 | return __i915_reset_in_progress(i915_reset_counter(error)); |
3078 | & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED)); | 3116 | } |
3117 | |||
3118 | static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error) | ||
3119 | { | ||
3120 | return __i915_reset_in_progress_or_wedged(i915_reset_counter(error)); | ||
3079 | } | 3121 | } |
3080 | 3122 | ||
3081 | static inline bool i915_terminally_wedged(struct i915_gpu_error *error) | 3123 | static inline bool i915_terminally_wedged(struct i915_gpu_error *error) |
3082 | { | 3124 | { |
3083 | return atomic_read(&error->reset_counter) & I915_WEDGED; | 3125 | return __i915_terminally_wedged(i915_reset_counter(error)); |
3084 | } | 3126 | } |
3085 | 3127 | ||
3086 | static inline u32 i915_reset_count(struct i915_gpu_error *error) | 3128 | static inline u32 i915_reset_count(struct i915_gpu_error *error) |
3087 | { | 3129 | { |
3088 | return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2; | 3130 | return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2; |
3089 | } | 3131 | } |
3090 | 3132 | ||
3091 | static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv) | 3133 | static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv) |
@@ -3118,7 +3160,6 @@ void __i915_add_request(struct drm_i915_gem_request *req, | |||
3118 | #define i915_add_request_no_flush(req) \ | 3160 | #define i915_add_request_no_flush(req) \ |
3119 | __i915_add_request(req, NULL, false) | 3161 | __i915_add_request(req, NULL, false) |
3120 | int __i915_wait_request(struct drm_i915_gem_request *req, | 3162 | int __i915_wait_request(struct drm_i915_gem_request *req, |
3121 | unsigned reset_counter, | ||
3122 | bool interruptible, | 3163 | bool interruptible, |
3123 | s64 *timeout, | 3164 | s64 *timeout, |
3124 | struct intel_rps_client *rps); | 3165 | struct intel_rps_client *rps); |
@@ -3455,6 +3496,7 @@ extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, | |||
3455 | bool enable); | 3496 | bool enable); |
3456 | extern int intel_opregion_notify_adapter(struct drm_device *dev, | 3497 | extern int intel_opregion_notify_adapter(struct drm_device *dev, |
3457 | pci_power_t state); | 3498 | pci_power_t state); |
3499 | extern int intel_opregion_get_panel_type(struct drm_device *dev); | ||
3458 | #else | 3500 | #else |
3459 | static inline int intel_opregion_setup(struct drm_device *dev) { return 0; } | 3501 | static inline int intel_opregion_setup(struct drm_device *dev) { return 0; } |
3460 | static inline void intel_opregion_init(struct drm_device *dev) { return; } | 3502 | static inline void intel_opregion_init(struct drm_device *dev) { return; } |
@@ -3470,6 +3512,10 @@ intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) | |||
3470 | { | 3512 | { |
3471 | return 0; | 3513 | return 0; |
3472 | } | 3514 | } |
3515 | static inline int intel_opregion_get_panel_type(struct drm_device *dev) | ||
3516 | { | ||
3517 | return -ENODEV; | ||
3518 | } | ||
3473 | #endif | 3519 | #endif |
3474 | 3520 | ||
3475 | /* intel_acpi.c */ | 3521 | /* intel_acpi.c */ |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f4abf3abd572..261a3ef72828 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -32,14 +32,13 @@ | |||
32 | #include "i915_vgpu.h" | 32 | #include "i915_vgpu.h" |
33 | #include "i915_trace.h" | 33 | #include "i915_trace.h" |
34 | #include "intel_drv.h" | 34 | #include "intel_drv.h" |
35 | #include "intel_mocs.h" | ||
35 | #include <linux/shmem_fs.h> | 36 | #include <linux/shmem_fs.h> |
36 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
37 | #include <linux/swap.h> | 38 | #include <linux/swap.h> |
38 | #include <linux/pci.h> | 39 | #include <linux/pci.h> |
39 | #include <linux/dma-buf.h> | 40 | #include <linux/dma-buf.h> |
40 | 41 | ||
41 | #define RQ_BUG_ON(expr) | ||
42 | |||
43 | static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); | 42 | static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); |
44 | static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); | 43 | static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); |
45 | static void | 44 | static void |
@@ -85,9 +84,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error) | |||
85 | { | 84 | { |
86 | int ret; | 85 | int ret; |
87 | 86 | ||
88 | #define EXIT_COND (!i915_reset_in_progress(error) || \ | 87 | if (!i915_reset_in_progress(error)) |
89 | i915_terminally_wedged(error)) | ||
90 | if (EXIT_COND) | ||
91 | return 0; | 88 | return 0; |
92 | 89 | ||
93 | /* | 90 | /* |
@@ -96,17 +93,16 @@ i915_gem_wait_for_error(struct i915_gpu_error *error) | |||
96 | * we should simply try to bail out and fail as gracefully as possible. | 93 | * we should simply try to bail out and fail as gracefully as possible. |
97 | */ | 94 | */ |
98 | ret = wait_event_interruptible_timeout(error->reset_queue, | 95 | ret = wait_event_interruptible_timeout(error->reset_queue, |
99 | EXIT_COND, | 96 | !i915_reset_in_progress(error), |
100 | 10*HZ); | 97 | 10*HZ); |
101 | if (ret == 0) { | 98 | if (ret == 0) { |
102 | DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); | 99 | DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); |
103 | return -EIO; | 100 | return -EIO; |
104 | } else if (ret < 0) { | 101 | } else if (ret < 0) { |
105 | return ret; | 102 | return ret; |
103 | } else { | ||
104 | return 0; | ||
106 | } | 105 | } |
107 | #undef EXIT_COND | ||
108 | |||
109 | return 0; | ||
110 | } | 106 | } |
111 | 107 | ||
112 | int i915_mutex_lock_interruptible(struct drm_device *dev) | 108 | int i915_mutex_lock_interruptible(struct drm_device *dev) |
@@ -211,11 +207,10 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj) | |||
211 | BUG_ON(obj->madv == __I915_MADV_PURGED); | 207 | BUG_ON(obj->madv == __I915_MADV_PURGED); |
212 | 208 | ||
213 | ret = i915_gem_object_set_to_cpu_domain(obj, true); | 209 | ret = i915_gem_object_set_to_cpu_domain(obj, true); |
214 | if (ret) { | 210 | if (WARN_ON(ret)) { |
215 | /* In the event of a disaster, abandon all caches and | 211 | /* In the event of a disaster, abandon all caches and |
216 | * hope for the best. | 212 | * hope for the best. |
217 | */ | 213 | */ |
218 | WARN_ON(ret != -EIO); | ||
219 | obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; | 214 | obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
220 | } | 215 | } |
221 | 216 | ||
@@ -1110,27 +1105,19 @@ put_rpm: | |||
1110 | return ret; | 1105 | return ret; |
1111 | } | 1106 | } |
1112 | 1107 | ||
1113 | int | 1108 | static int |
1114 | i915_gem_check_wedge(struct i915_gpu_error *error, | 1109 | i915_gem_check_wedge(unsigned reset_counter, bool interruptible) |
1115 | bool interruptible) | ||
1116 | { | 1110 | { |
1117 | if (i915_reset_in_progress(error)) { | 1111 | if (__i915_terminally_wedged(reset_counter)) |
1112 | return -EIO; | ||
1113 | |||
1114 | if (__i915_reset_in_progress(reset_counter)) { | ||
1118 | /* Non-interruptible callers can't handle -EAGAIN, hence return | 1115 | /* Non-interruptible callers can't handle -EAGAIN, hence return |
1119 | * -EIO unconditionally for these. */ | 1116 | * -EIO unconditionally for these. */ |
1120 | if (!interruptible) | 1117 | if (!interruptible) |
1121 | return -EIO; | 1118 | return -EIO; |
1122 | 1119 | ||
1123 | /* Recovery complete, but the reset failed ... */ | 1120 | return -EAGAIN; |
1124 | if (i915_terminally_wedged(error)) | ||
1125 | return -EIO; | ||
1126 | |||
1127 | /* | ||
1128 | * Check if GPU Reset is in progress - we need intel_ring_begin | ||
1129 | * to work properly to reinit the hw state while the gpu is | ||
1130 | * still marked as reset-in-progress. Handle this with a flag. | ||
1131 | */ | ||
1132 | if (!error->reload_in_reset) | ||
1133 | return -EAGAIN; | ||
1134 | } | 1121 | } |
1135 | 1122 | ||
1136 | return 0; | 1123 | return 0; |
@@ -1224,7 +1211,6 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state) | |||
1224 | /** | 1211 | /** |
1225 | * __i915_wait_request - wait until execution of request has finished | 1212 | * __i915_wait_request - wait until execution of request has finished |
1226 | * @req: duh! | 1213 | * @req: duh! |
1227 | * @reset_counter: reset sequence associated with the given request | ||
1228 | * @interruptible: do an interruptible wait (normally yes) | 1214 | * @interruptible: do an interruptible wait (normally yes) |
1229 | * @timeout: in - how long to wait (NULL forever); out - how much time remaining | 1215 | * @timeout: in - how long to wait (NULL forever); out - how much time remaining |
1230 | * | 1216 | * |
@@ -1239,7 +1225,6 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state) | |||
1239 | * errno with remaining time filled in timeout argument. | 1225 | * errno with remaining time filled in timeout argument. |
1240 | */ | 1226 | */ |
1241 | int __i915_wait_request(struct drm_i915_gem_request *req, | 1227 | int __i915_wait_request(struct drm_i915_gem_request *req, |
1242 | unsigned reset_counter, | ||
1243 | bool interruptible, | 1228 | bool interruptible, |
1244 | s64 *timeout, | 1229 | s64 *timeout, |
1245 | struct intel_rps_client *rps) | 1230 | struct intel_rps_client *rps) |
@@ -1300,13 +1285,14 @@ int __i915_wait_request(struct drm_i915_gem_request *req, | |||
1300 | prepare_to_wait(&engine->irq_queue, &wait, state); | 1285 | prepare_to_wait(&engine->irq_queue, &wait, state); |
1301 | 1286 | ||
1302 | /* We need to check whether any gpu reset happened in between | 1287 | /* We need to check whether any gpu reset happened in between |
1303 | * the caller grabbing the seqno and now ... */ | 1288 | * the request being submitted and now. If a reset has occurred, |
1304 | if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) { | 1289 | * the request is effectively complete (we either are in the |
1305 | /* ... but upgrade the -EAGAIN to an -EIO if the gpu | 1290 | * process of or have discarded the rendering and completely |
1306 | * is truely gone. */ | 1291 | * reset the GPU. The results of the request are lost and we |
1307 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); | 1292 | * are free to continue on with the original operation. |
1308 | if (ret == 0) | 1293 | */ |
1309 | ret = -EAGAIN; | 1294 | if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) { |
1295 | ret = 0; | ||
1310 | break; | 1296 | break; |
1311 | } | 1297 | } |
1312 | 1298 | ||
@@ -1458,26 +1444,15 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req) | |||
1458 | int | 1444 | int |
1459 | i915_wait_request(struct drm_i915_gem_request *req) | 1445 | i915_wait_request(struct drm_i915_gem_request *req) |
1460 | { | 1446 | { |
1461 | struct drm_device *dev; | 1447 | struct drm_i915_private *dev_priv = req->i915; |
1462 | struct drm_i915_private *dev_priv; | ||
1463 | bool interruptible; | 1448 | bool interruptible; |
1464 | int ret; | 1449 | int ret; |
1465 | 1450 | ||
1466 | BUG_ON(req == NULL); | ||
1467 | |||
1468 | dev = req->engine->dev; | ||
1469 | dev_priv = dev->dev_private; | ||
1470 | interruptible = dev_priv->mm.interruptible; | 1451 | interruptible = dev_priv->mm.interruptible; |
1471 | 1452 | ||
1472 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | 1453 | BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); |
1473 | |||
1474 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); | ||
1475 | if (ret) | ||
1476 | return ret; | ||
1477 | 1454 | ||
1478 | ret = __i915_wait_request(req, | 1455 | ret = __i915_wait_request(req, interruptible, NULL, NULL); |
1479 | atomic_read(&dev_priv->gpu_error.reset_counter), | ||
1480 | interruptible, NULL, NULL); | ||
1481 | if (ret) | 1456 | if (ret) |
1482 | return ret; | 1457 | return ret; |
1483 | 1458 | ||
@@ -1521,7 +1496,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, | |||
1521 | 1496 | ||
1522 | i915_gem_object_retire__read(obj, i); | 1497 | i915_gem_object_retire__read(obj, i); |
1523 | } | 1498 | } |
1524 | RQ_BUG_ON(obj->active); | 1499 | GEM_BUG_ON(obj->active); |
1525 | } | 1500 | } |
1526 | 1501 | ||
1527 | return 0; | 1502 | return 0; |
@@ -1552,7 +1527,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, | |||
1552 | struct drm_device *dev = obj->base.dev; | 1527 | struct drm_device *dev = obj->base.dev; |
1553 | struct drm_i915_private *dev_priv = dev->dev_private; | 1528 | struct drm_i915_private *dev_priv = dev->dev_private; |
1554 | struct drm_i915_gem_request *requests[I915_NUM_ENGINES]; | 1529 | struct drm_i915_gem_request *requests[I915_NUM_ENGINES]; |
1555 | unsigned reset_counter; | ||
1556 | int ret, i, n = 0; | 1530 | int ret, i, n = 0; |
1557 | 1531 | ||
1558 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | 1532 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
@@ -1561,12 +1535,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, | |||
1561 | if (!obj->active) | 1535 | if (!obj->active) |
1562 | return 0; | 1536 | return 0; |
1563 | 1537 | ||
1564 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, true); | ||
1565 | if (ret) | ||
1566 | return ret; | ||
1567 | |||
1568 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); | ||
1569 | |||
1570 | if (readonly) { | 1538 | if (readonly) { |
1571 | struct drm_i915_gem_request *req; | 1539 | struct drm_i915_gem_request *req; |
1572 | 1540 | ||
@@ -1588,9 +1556,9 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, | |||
1588 | } | 1556 | } |
1589 | 1557 | ||
1590 | mutex_unlock(&dev->struct_mutex); | 1558 | mutex_unlock(&dev->struct_mutex); |
1559 | ret = 0; | ||
1591 | for (i = 0; ret == 0 && i < n; i++) | 1560 | for (i = 0; ret == 0 && i < n; i++) |
1592 | ret = __i915_wait_request(requests[i], reset_counter, true, | 1561 | ret = __i915_wait_request(requests[i], true, NULL, rps); |
1593 | NULL, rps); | ||
1594 | mutex_lock(&dev->struct_mutex); | 1562 | mutex_lock(&dev->struct_mutex); |
1595 | 1563 | ||
1596 | for (i = 0; i < n; i++) { | 1564 | for (i = 0; i < n; i++) { |
@@ -1964,11 +1932,27 @@ out: | |||
1964 | void | 1932 | void |
1965 | i915_gem_release_mmap(struct drm_i915_gem_object *obj) | 1933 | i915_gem_release_mmap(struct drm_i915_gem_object *obj) |
1966 | { | 1934 | { |
1935 | /* Serialisation between user GTT access and our code depends upon | ||
1936 | * revoking the CPU's PTE whilst the mutex is held. The next user | ||
1937 | * pagefault then has to wait until we release the mutex. | ||
1938 | */ | ||
1939 | lockdep_assert_held(&obj->base.dev->struct_mutex); | ||
1940 | |||
1967 | if (!obj->fault_mappable) | 1941 | if (!obj->fault_mappable) |
1968 | return; | 1942 | return; |
1969 | 1943 | ||
1970 | drm_vma_node_unmap(&obj->base.vma_node, | 1944 | drm_vma_node_unmap(&obj->base.vma_node, |
1971 | obj->base.dev->anon_inode->i_mapping); | 1945 | obj->base.dev->anon_inode->i_mapping); |
1946 | |||
1947 | /* Ensure that the CPU's PTE are revoked and there are not outstanding | ||
1948 | * memory transactions from userspace before we return. The TLB | ||
1949 | * flushing implied above by changing the PTE above *should* be | ||
1950 | * sufficient, an extra barrier here just provides us with a bit | ||
1951 | * of paranoid documentation about our requirement to serialise | ||
1952 | * memory writes before touching registers / GSM. | ||
1953 | */ | ||
1954 | wmb(); | ||
1955 | |||
1972 | obj->fault_mappable = false; | 1956 | obj->fault_mappable = false; |
1973 | } | 1957 | } |
1974 | 1958 | ||
@@ -2177,11 +2161,10 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) | |||
2177 | BUG_ON(obj->madv == __I915_MADV_PURGED); | 2161 | BUG_ON(obj->madv == __I915_MADV_PURGED); |
2178 | 2162 | ||
2179 | ret = i915_gem_object_set_to_cpu_domain(obj, true); | 2163 | ret = i915_gem_object_set_to_cpu_domain(obj, true); |
2180 | if (ret) { | 2164 | if (WARN_ON(ret)) { |
2181 | /* In the event of a disaster, abandon all caches and | 2165 | /* In the event of a disaster, abandon all caches and |
2182 | * hope for the best. | 2166 | * hope for the best. |
2183 | */ | 2167 | */ |
2184 | WARN_ON(ret != -EIO); | ||
2185 | i915_gem_clflush_object(obj, true); | 2168 | i915_gem_clflush_object(obj, true); |
2186 | obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; | 2169 | obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
2187 | } | 2170 | } |
@@ -2470,8 +2453,8 @@ void i915_vma_move_to_active(struct i915_vma *vma, | |||
2470 | static void | 2453 | static void |
2471 | i915_gem_object_retire__write(struct drm_i915_gem_object *obj) | 2454 | i915_gem_object_retire__write(struct drm_i915_gem_object *obj) |
2472 | { | 2455 | { |
2473 | RQ_BUG_ON(obj->last_write_req == NULL); | 2456 | GEM_BUG_ON(obj->last_write_req == NULL); |
2474 | RQ_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine))); | 2457 | GEM_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine))); |
2475 | 2458 | ||
2476 | i915_gem_request_assign(&obj->last_write_req, NULL); | 2459 | i915_gem_request_assign(&obj->last_write_req, NULL); |
2477 | intel_fb_obj_flush(obj, true, ORIGIN_CS); | 2460 | intel_fb_obj_flush(obj, true, ORIGIN_CS); |
@@ -2482,8 +2465,8 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring) | |||
2482 | { | 2465 | { |
2483 | struct i915_vma *vma; | 2466 | struct i915_vma *vma; |
2484 | 2467 | ||
2485 | RQ_BUG_ON(obj->last_read_req[ring] == NULL); | 2468 | GEM_BUG_ON(obj->last_read_req[ring] == NULL); |
2486 | RQ_BUG_ON(!(obj->active & (1 << ring))); | 2469 | GEM_BUG_ON(!(obj->active & (1 << ring))); |
2487 | 2470 | ||
2488 | list_del_init(&obj->engine_list[ring]); | 2471 | list_del_init(&obj->engine_list[ring]); |
2489 | i915_gem_request_assign(&obj->last_read_req[ring], NULL); | 2472 | i915_gem_request_assign(&obj->last_read_req[ring], NULL); |
@@ -2743,6 +2726,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine, | |||
2743 | struct drm_i915_gem_request **req_out) | 2726 | struct drm_i915_gem_request **req_out) |
2744 | { | 2727 | { |
2745 | struct drm_i915_private *dev_priv = to_i915(engine->dev); | 2728 | struct drm_i915_private *dev_priv = to_i915(engine->dev); |
2729 | unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error); | ||
2746 | struct drm_i915_gem_request *req; | 2730 | struct drm_i915_gem_request *req; |
2747 | int ret; | 2731 | int ret; |
2748 | 2732 | ||
@@ -2751,6 +2735,14 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine, | |||
2751 | 2735 | ||
2752 | *req_out = NULL; | 2736 | *req_out = NULL; |
2753 | 2737 | ||
2738 | /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report | ||
2739 | * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex | ||
2740 | * and restart. | ||
2741 | */ | ||
2742 | ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible); | ||
2743 | if (ret) | ||
2744 | return ret; | ||
2745 | |||
2754 | req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL); | 2746 | req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL); |
2755 | if (req == NULL) | 2747 | if (req == NULL) |
2756 | return -ENOMEM; | 2748 | return -ENOMEM; |
@@ -2762,6 +2754,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine, | |||
2762 | kref_init(&req->ref); | 2754 | kref_init(&req->ref); |
2763 | req->i915 = dev_priv; | 2755 | req->i915 = dev_priv; |
2764 | req->engine = engine; | 2756 | req->engine = engine; |
2757 | req->reset_counter = reset_counter; | ||
2765 | req->ctx = ctx; | 2758 | req->ctx = ctx; |
2766 | i915_gem_context_reference(req->ctx); | 2759 | i915_gem_context_reference(req->ctx); |
2767 | 2760 | ||
@@ -2791,7 +2784,8 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine, | |||
2791 | * fully prepared. Thus it can be cleaned up using the proper | 2784 | * fully prepared. Thus it can be cleaned up using the proper |
2792 | * free code. | 2785 | * free code. |
2793 | */ | 2786 | */ |
2794 | i915_gem_request_cancel(req); | 2787 | intel_ring_reserved_space_cancel(req->ringbuf); |
2788 | i915_gem_request_unreference(req); | ||
2795 | return ret; | 2789 | return ret; |
2796 | } | 2790 | } |
2797 | 2791 | ||
@@ -2828,13 +2822,6 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, | |||
2828 | return err ? ERR_PTR(err) : req; | 2822 | return err ? ERR_PTR(err) : req; |
2829 | } | 2823 | } |
2830 | 2824 | ||
2831 | void i915_gem_request_cancel(struct drm_i915_gem_request *req) | ||
2832 | { | ||
2833 | intel_ring_reserved_space_cancel(req->ringbuf); | ||
2834 | |||
2835 | i915_gem_request_unreference(req); | ||
2836 | } | ||
2837 | |||
2838 | struct drm_i915_gem_request * | 2825 | struct drm_i915_gem_request * |
2839 | i915_gem_find_active_request(struct intel_engine_cs *engine) | 2826 | i915_gem_find_active_request(struct intel_engine_cs *engine) |
2840 | { | 2827 | { |
@@ -3140,11 +3127,9 @@ retire: | |||
3140 | int | 3127 | int |
3141 | i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | 3128 | i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) |
3142 | { | 3129 | { |
3143 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3144 | struct drm_i915_gem_wait *args = data; | 3130 | struct drm_i915_gem_wait *args = data; |
3145 | struct drm_i915_gem_object *obj; | 3131 | struct drm_i915_gem_object *obj; |
3146 | struct drm_i915_gem_request *req[I915_NUM_ENGINES]; | 3132 | struct drm_i915_gem_request *req[I915_NUM_ENGINES]; |
3147 | unsigned reset_counter; | ||
3148 | int i, n = 0; | 3133 | int i, n = 0; |
3149 | int ret; | 3134 | int ret; |
3150 | 3135 | ||
@@ -3178,7 +3163,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
3178 | } | 3163 | } |
3179 | 3164 | ||
3180 | drm_gem_object_unreference(&obj->base); | 3165 | drm_gem_object_unreference(&obj->base); |
3181 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); | ||
3182 | 3166 | ||
3183 | for (i = 0; i < I915_NUM_ENGINES; i++) { | 3167 | for (i = 0; i < I915_NUM_ENGINES; i++) { |
3184 | if (obj->last_read_req[i] == NULL) | 3168 | if (obj->last_read_req[i] == NULL) |
@@ -3191,7 +3175,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
3191 | 3175 | ||
3192 | for (i = 0; i < n; i++) { | 3176 | for (i = 0; i < n; i++) { |
3193 | if (ret == 0) | 3177 | if (ret == 0) |
3194 | ret = __i915_wait_request(req[i], reset_counter, true, | 3178 | ret = __i915_wait_request(req[i], true, |
3195 | args->timeout_ns > 0 ? &args->timeout_ns : NULL, | 3179 | args->timeout_ns > 0 ? &args->timeout_ns : NULL, |
3196 | to_rps_client(file)); | 3180 | to_rps_client(file)); |
3197 | i915_gem_request_unreference__unlocked(req[i]); | 3181 | i915_gem_request_unreference__unlocked(req[i]); |
@@ -3223,7 +3207,6 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj, | |||
3223 | if (!i915_semaphore_is_enabled(obj->base.dev)) { | 3207 | if (!i915_semaphore_is_enabled(obj->base.dev)) { |
3224 | struct drm_i915_private *i915 = to_i915(obj->base.dev); | 3208 | struct drm_i915_private *i915 = to_i915(obj->base.dev); |
3225 | ret = __i915_wait_request(from_req, | 3209 | ret = __i915_wait_request(from_req, |
3226 | atomic_read(&i915->gpu_error.reset_counter), | ||
3227 | i915->mm.interruptible, | 3210 | i915->mm.interruptible, |
3228 | NULL, | 3211 | NULL, |
3229 | &i915->rps.semaphores); | 3212 | &i915->rps.semaphores); |
@@ -3344,9 +3327,6 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) | |||
3344 | if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) | 3327 | if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) |
3345 | return; | 3328 | return; |
3346 | 3329 | ||
3347 | /* Wait for any direct GTT access to complete */ | ||
3348 | mb(); | ||
3349 | |||
3350 | old_read_domains = obj->base.read_domains; | 3330 | old_read_domains = obj->base.read_domains; |
3351 | old_write_domain = obj->base.write_domain; | 3331 | old_write_domain = obj->base.write_domain; |
3352 | 3332 | ||
@@ -3451,12 +3431,9 @@ int i915_gpu_idle(struct drm_device *dev) | |||
3451 | return PTR_ERR(req); | 3431 | return PTR_ERR(req); |
3452 | 3432 | ||
3453 | ret = i915_switch_context(req); | 3433 | ret = i915_switch_context(req); |
3454 | if (ret) { | ||
3455 | i915_gem_request_cancel(req); | ||
3456 | return ret; | ||
3457 | } | ||
3458 | |||
3459 | i915_add_request_no_flush(req); | 3434 | i915_add_request_no_flush(req); |
3435 | if (ret) | ||
3436 | return ret; | ||
3460 | } | 3437 | } |
3461 | 3438 | ||
3462 | ret = intel_engine_idle(engine); | 3439 | ret = intel_engine_idle(engine); |
@@ -4179,16 +4156,15 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) | |||
4179 | struct drm_i915_file_private *file_priv = file->driver_priv; | 4156 | struct drm_i915_file_private *file_priv = file->driver_priv; |
4180 | unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; | 4157 | unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; |
4181 | struct drm_i915_gem_request *request, *target = NULL; | 4158 | struct drm_i915_gem_request *request, *target = NULL; |
4182 | unsigned reset_counter; | ||
4183 | int ret; | 4159 | int ret; |
4184 | 4160 | ||
4185 | ret = i915_gem_wait_for_error(&dev_priv->gpu_error); | 4161 | ret = i915_gem_wait_for_error(&dev_priv->gpu_error); |
4186 | if (ret) | 4162 | if (ret) |
4187 | return ret; | 4163 | return ret; |
4188 | 4164 | ||
4189 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, false); | 4165 | /* ABI: return -EIO if already wedged */ |
4190 | if (ret) | 4166 | if (i915_terminally_wedged(&dev_priv->gpu_error)) |
4191 | return ret; | 4167 | return -EIO; |
4192 | 4168 | ||
4193 | spin_lock(&file_priv->mm.lock); | 4169 | spin_lock(&file_priv->mm.lock); |
4194 | list_for_each_entry(request, &file_priv->mm.request_list, client_list) { | 4170 | list_for_each_entry(request, &file_priv->mm.request_list, client_list) { |
@@ -4204,7 +4180,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) | |||
4204 | 4180 | ||
4205 | target = request; | 4181 | target = request; |
4206 | } | 4182 | } |
4207 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); | ||
4208 | if (target) | 4183 | if (target) |
4209 | i915_gem_request_reference(target); | 4184 | i915_gem_request_reference(target); |
4210 | spin_unlock(&file_priv->mm.lock); | 4185 | spin_unlock(&file_priv->mm.lock); |
@@ -4212,7 +4187,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) | |||
4212 | if (target == NULL) | 4187 | if (target == NULL) |
4213 | return 0; | 4188 | return 0; |
4214 | 4189 | ||
4215 | ret = __i915_wait_request(target, reset_counter, true, NULL, NULL); | 4190 | ret = __i915_wait_request(target, true, NULL, NULL); |
4216 | if (ret == 0) | 4191 | if (ret == 0) |
4217 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); | 4192 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); |
4218 | 4193 | ||
@@ -4372,7 +4347,6 @@ i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, | |||
4372 | { | 4347 | { |
4373 | struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view); | 4348 | struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view); |
4374 | 4349 | ||
4375 | BUG_ON(!vma); | ||
4376 | WARN_ON(vma->pin_count == 0); | 4350 | WARN_ON(vma->pin_count == 0); |
4377 | WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view)); | 4351 | WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view)); |
4378 | 4352 | ||
@@ -4889,7 +4863,7 @@ i915_gem_init_hw(struct drm_device *dev) | |||
4889 | /* Double layer security blanket, see i915_gem_init() */ | 4863 | /* Double layer security blanket, see i915_gem_init() */ |
4890 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | 4864 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); |
4891 | 4865 | ||
4892 | if (dev_priv->ellc_size) | 4866 | if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9) |
4893 | I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); | 4867 | I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); |
4894 | 4868 | ||
4895 | if (IS_HASWELL(dev)) | 4869 | if (IS_HASWELL(dev)) |
@@ -4933,6 +4907,8 @@ i915_gem_init_hw(struct drm_device *dev) | |||
4933 | goto out; | 4907 | goto out; |
4934 | } | 4908 | } |
4935 | 4909 | ||
4910 | intel_mocs_init_l3cc_table(dev); | ||
4911 | |||
4936 | /* We can't enable contexts until all firmware is loaded */ | 4912 | /* We can't enable contexts until all firmware is loaded */ |
4937 | if (HAS_GUC_UCODE(dev)) { | 4913 | if (HAS_GUC_UCODE(dev)) { |
4938 | ret = intel_guc_ucode_load(dev); | 4914 | ret = intel_guc_ucode_load(dev); |
@@ -4958,34 +4934,33 @@ i915_gem_init_hw(struct drm_device *dev) | |||
4958 | req = i915_gem_request_alloc(engine, NULL); | 4934 | req = i915_gem_request_alloc(engine, NULL); |
4959 | if (IS_ERR(req)) { | 4935 | if (IS_ERR(req)) { |
4960 | ret = PTR_ERR(req); | 4936 | ret = PTR_ERR(req); |
4961 | i915_gem_cleanup_engines(dev); | 4937 | break; |
4962 | goto out; | ||
4963 | } | 4938 | } |
4964 | 4939 | ||
4965 | if (engine->id == RCS) { | 4940 | if (engine->id == RCS) { |
4966 | for (j = 0; j < NUM_L3_SLICES(dev); j++) | 4941 | for (j = 0; j < NUM_L3_SLICES(dev); j++) { |
4967 | i915_gem_l3_remap(req, j); | 4942 | ret = i915_gem_l3_remap(req, j); |
4943 | if (ret) | ||
4944 | goto err_request; | ||
4945 | } | ||
4968 | } | 4946 | } |
4969 | 4947 | ||
4970 | ret = i915_ppgtt_init_ring(req); | 4948 | ret = i915_ppgtt_init_ring(req); |
4971 | if (ret && ret != -EIO) { | 4949 | if (ret) |
4972 | DRM_ERROR("PPGTT enable %s failed %d\n", | 4950 | goto err_request; |
4973 | engine->name, ret); | ||
4974 | i915_gem_request_cancel(req); | ||
4975 | i915_gem_cleanup_engines(dev); | ||
4976 | goto out; | ||
4977 | } | ||
4978 | 4951 | ||
4979 | ret = i915_gem_context_enable(req); | 4952 | ret = i915_gem_context_enable(req); |
4980 | if (ret && ret != -EIO) { | 4953 | if (ret) |
4981 | DRM_ERROR("Context enable %s failed %d\n", | 4954 | goto err_request; |
4955 | |||
4956 | err_request: | ||
4957 | i915_add_request_no_flush(req); | ||
4958 | if (ret) { | ||
4959 | DRM_ERROR("Failed to enable %s, error=%d\n", | ||
4982 | engine->name, ret); | 4960 | engine->name, ret); |
4983 | i915_gem_request_cancel(req); | ||
4984 | i915_gem_cleanup_engines(dev); | 4961 | i915_gem_cleanup_engines(dev); |
4985 | goto out; | 4962 | break; |
4986 | } | 4963 | } |
4987 | |||
4988 | i915_add_request_no_flush(req); | ||
4989 | } | 4964 | } |
4990 | 4965 | ||
4991 | out: | 4966 | out: |
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h new file mode 100644 index 000000000000..8292e797d9b5 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem.h | |||
@@ -0,0 +1,34 @@ | |||
1 | /* | ||
2 | * Copyright © 2016 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
21 | * IN THE SOFTWARE. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #ifndef __I915_GEM_H__ | ||
26 | #define __I915_GEM_H__ | ||
27 | |||
28 | #ifdef CONFIG_DRM_I915_DEBUG_GEM | ||
29 | #define GEM_BUG_ON(expr) BUG_ON(expr) | ||
30 | #else | ||
31 | #define GEM_BUG_ON(expr) | ||
32 | #endif | ||
33 | |||
34 | #endif /* __I915_GEM_H__ */ | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index fe580cb9501a..e5acc3916f75 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
@@ -342,7 +342,7 @@ void i915_gem_context_reset(struct drm_device *dev) | |||
342 | struct intel_context *ctx; | 342 | struct intel_context *ctx; |
343 | 343 | ||
344 | list_for_each_entry(ctx, &dev_priv->context_list, link) | 344 | list_for_each_entry(ctx, &dev_priv->context_list, link) |
345 | intel_lr_context_reset(dev, ctx); | 345 | intel_lr_context_reset(dev_priv, ctx); |
346 | } | 346 | } |
347 | 347 | ||
348 | for (i = 0; i < I915_NUM_ENGINES; i++) { | 348 | for (i = 0; i < I915_NUM_ENGINES; i++) { |
@@ -539,7 +539,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) | |||
539 | 539 | ||
540 | len = 4; | 540 | len = 4; |
541 | if (INTEL_INFO(engine->dev)->gen >= 7) | 541 | if (INTEL_INFO(engine->dev)->gen >= 7) |
542 | len += 2 + (num_rings ? 4*num_rings + 2 : 0); | 542 | len += 2 + (num_rings ? 4*num_rings + 6 : 0); |
543 | 543 | ||
544 | ret = intel_ring_begin(req, len); | 544 | ret = intel_ring_begin(req, len); |
545 | if (ret) | 545 | if (ret) |
@@ -579,6 +579,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) | |||
579 | if (INTEL_INFO(engine->dev)->gen >= 7) { | 579 | if (INTEL_INFO(engine->dev)->gen >= 7) { |
580 | if (num_rings) { | 580 | if (num_rings) { |
581 | struct intel_engine_cs *signaller; | 581 | struct intel_engine_cs *signaller; |
582 | i915_reg_t last_reg = {}; /* keep gcc quiet */ | ||
582 | 583 | ||
583 | intel_ring_emit(engine, | 584 | intel_ring_emit(engine, |
584 | MI_LOAD_REGISTER_IMM(num_rings)); | 585 | MI_LOAD_REGISTER_IMM(num_rings)); |
@@ -586,11 +587,19 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) | |||
586 | if (signaller == engine) | 587 | if (signaller == engine) |
587 | continue; | 588 | continue; |
588 | 589 | ||
589 | intel_ring_emit_reg(engine, | 590 | last_reg = RING_PSMI_CTL(signaller->mmio_base); |
590 | RING_PSMI_CTL(signaller->mmio_base)); | 591 | intel_ring_emit_reg(engine, last_reg); |
591 | intel_ring_emit(engine, | 592 | intel_ring_emit(engine, |
592 | _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); | 593 | _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); |
593 | } | 594 | } |
595 | |||
596 | /* Insert a delay before the next switch! */ | ||
597 | intel_ring_emit(engine, | ||
598 | MI_STORE_REGISTER_MEM | | ||
599 | MI_SRM_LRM_GLOBAL_GTT); | ||
600 | intel_ring_emit_reg(engine, last_reg); | ||
601 | intel_ring_emit(engine, engine->scratch.gtt_offset); | ||
602 | intel_ring_emit(engine, MI_NOOP); | ||
594 | } | 603 | } |
595 | intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_ENABLE); | 604 | intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_ENABLE); |
596 | } | 605 | } |
@@ -600,50 +609,48 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) | |||
600 | return ret; | 609 | return ret; |
601 | } | 610 | } |
602 | 611 | ||
603 | static inline bool should_skip_switch(struct intel_engine_cs *engine, | 612 | static inline bool skip_rcs_switch(struct intel_engine_cs *engine, |
604 | struct intel_context *from, | 613 | struct intel_context *to) |
605 | struct intel_context *to) | ||
606 | { | 614 | { |
607 | if (to->remap_slice) | 615 | if (to->remap_slice) |
608 | return false; | 616 | return false; |
609 | 617 | ||
610 | if (to->ppgtt && from == to && | 618 | if (!to->legacy_hw_ctx.initialized) |
619 | return false; | ||
620 | |||
621 | if (to->ppgtt && | ||
611 | !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) | 622 | !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) |
612 | return true; | 623 | return false; |
613 | 624 | ||
614 | return false; | 625 | return to == engine->last_context; |
615 | } | 626 | } |
616 | 627 | ||
617 | static bool | 628 | static bool |
618 | needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to) | 629 | needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to) |
619 | { | 630 | { |
620 | struct drm_i915_private *dev_priv = engine->dev->dev_private; | ||
621 | |||
622 | if (!to->ppgtt) | 631 | if (!to->ppgtt) |
623 | return false; | 632 | return false; |
624 | 633 | ||
625 | if (INTEL_INFO(engine->dev)->gen < 8) | 634 | if (engine->last_context == to && |
635 | !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) | ||
636 | return false; | ||
637 | |||
638 | if (engine->id != RCS) | ||
626 | return true; | 639 | return true; |
627 | 640 | ||
628 | if (engine != &dev_priv->engine[RCS]) | 641 | if (INTEL_INFO(engine->dev)->gen < 8) |
629 | return true; | 642 | return true; |
630 | 643 | ||
631 | return false; | 644 | return false; |
632 | } | 645 | } |
633 | 646 | ||
634 | static bool | 647 | static bool |
635 | needs_pd_load_post(struct intel_engine_cs *engine, struct intel_context *to, | 648 | needs_pd_load_post(struct intel_context *to, u32 hw_flags) |
636 | u32 hw_flags) | ||
637 | { | 649 | { |
638 | struct drm_i915_private *dev_priv = engine->dev->dev_private; | ||
639 | |||
640 | if (!to->ppgtt) | 650 | if (!to->ppgtt) |
641 | return false; | 651 | return false; |
642 | 652 | ||
643 | if (!IS_GEN8(engine->dev)) | 653 | if (!IS_GEN8(to->i915)) |
644 | return false; | ||
645 | |||
646 | if (engine != &dev_priv->engine[RCS]) | ||
647 | return false; | 654 | return false; |
648 | 655 | ||
649 | if (hw_flags & MI_RESTORE_INHIBIT) | 656 | if (hw_flags & MI_RESTORE_INHIBIT) |
@@ -652,60 +659,33 @@ needs_pd_load_post(struct intel_engine_cs *engine, struct intel_context *to, | |||
652 | return false; | 659 | return false; |
653 | } | 660 | } |
654 | 661 | ||
655 | static int do_switch(struct drm_i915_gem_request *req) | 662 | static int do_rcs_switch(struct drm_i915_gem_request *req) |
656 | { | 663 | { |
657 | struct intel_context *to = req->ctx; | 664 | struct intel_context *to = req->ctx; |
658 | struct intel_engine_cs *engine = req->engine; | 665 | struct intel_engine_cs *engine = req->engine; |
659 | struct drm_i915_private *dev_priv = req->i915; | 666 | struct intel_context *from; |
660 | struct intel_context *from = engine->last_context; | 667 | u32 hw_flags; |
661 | u32 hw_flags = 0; | ||
662 | bool uninitialized = false; | ||
663 | int ret, i; | 668 | int ret, i; |
664 | 669 | ||
665 | if (from != NULL && engine == &dev_priv->engine[RCS]) { | 670 | if (skip_rcs_switch(engine, to)) |
666 | BUG_ON(from->legacy_hw_ctx.rcs_state == NULL); | ||
667 | BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state)); | ||
668 | } | ||
669 | |||
670 | if (should_skip_switch(engine, from, to)) | ||
671 | return 0; | 671 | return 0; |
672 | 672 | ||
673 | /* Trying to pin first makes error handling easier. */ | 673 | /* Trying to pin first makes error handling easier. */ |
674 | if (engine == &dev_priv->engine[RCS]) { | 674 | ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state, |
675 | ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state, | 675 | get_context_alignment(engine->dev), |
676 | get_context_alignment(engine->dev), | 676 | 0); |
677 | 0); | 677 | if (ret) |
678 | if (ret) | 678 | return ret; |
679 | return ret; | ||
680 | } | ||
681 | 679 | ||
682 | /* | 680 | /* |
683 | * Pin can switch back to the default context if we end up calling into | 681 | * Pin can switch back to the default context if we end up calling into |
684 | * evict_everything - as a last ditch gtt defrag effort that also | 682 | * evict_everything - as a last ditch gtt defrag effort that also |
685 | * switches to the default context. Hence we need to reload from here. | 683 | * switches to the default context. Hence we need to reload from here. |
684 | * | ||
685 | * XXX: Doing so is painfully broken! | ||
686 | */ | 686 | */ |
687 | from = engine->last_context; | 687 | from = engine->last_context; |
688 | 688 | ||
689 | if (needs_pd_load_pre(engine, to)) { | ||
690 | /* Older GENs and non render rings still want the load first, | ||
691 | * "PP_DCLV followed by PP_DIR_BASE register through Load | ||
692 | * Register Immediate commands in Ring Buffer before submitting | ||
693 | * a context."*/ | ||
694 | trace_switch_mm(engine, to); | ||
695 | ret = to->ppgtt->switch_mm(to->ppgtt, req); | ||
696 | if (ret) | ||
697 | goto unpin_out; | ||
698 | |||
699 | /* Doing a PD load always reloads the page dirs */ | ||
700 | to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); | ||
701 | } | ||
702 | |||
703 | if (engine != &dev_priv->engine[RCS]) { | ||
704 | if (from) | ||
705 | i915_gem_context_unreference(from); | ||
706 | goto done; | ||
707 | } | ||
708 | |||
709 | /* | 689 | /* |
710 | * Clear this page out of any CPU caches for coherent swap-in/out. Note | 690 | * Clear this page out of any CPU caches for coherent swap-in/out. Note |
711 | * that thanks to write = false in this call and us not setting any gpu | 691 | * that thanks to write = false in this call and us not setting any gpu |
@@ -718,53 +698,37 @@ static int do_switch(struct drm_i915_gem_request *req) | |||
718 | if (ret) | 698 | if (ret) |
719 | goto unpin_out; | 699 | goto unpin_out; |
720 | 700 | ||
721 | if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) { | 701 | if (needs_pd_load_pre(engine, to)) { |
722 | hw_flags |= MI_RESTORE_INHIBIT; | 702 | /* Older GENs and non render rings still want the load first, |
703 | * "PP_DCLV followed by PP_DIR_BASE register through Load | ||
704 | * Register Immediate commands in Ring Buffer before submitting | ||
705 | * a context."*/ | ||
706 | trace_switch_mm(engine, to); | ||
707 | ret = to->ppgtt->switch_mm(to->ppgtt, req); | ||
708 | if (ret) | ||
709 | goto unpin_out; | ||
710 | } | ||
711 | |||
712 | if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) | ||
723 | /* NB: If we inhibit the restore, the context is not allowed to | 713 | /* NB: If we inhibit the restore, the context is not allowed to |
724 | * die because future work may end up depending on valid address | 714 | * die because future work may end up depending on valid address |
725 | * space. This means we must enforce that a page table load | 715 | * space. This means we must enforce that a page table load |
726 | * occur when this occurs. */ | 716 | * occur when this occurs. */ |
727 | } else if (to->ppgtt && | 717 | hw_flags = MI_RESTORE_INHIBIT; |
728 | (intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) { | 718 | else if (to->ppgtt && |
729 | hw_flags |= MI_FORCE_RESTORE; | 719 | intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings) |
730 | to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); | 720 | hw_flags = MI_FORCE_RESTORE; |
731 | } | 721 | else |
722 | hw_flags = 0; | ||
732 | 723 | ||
733 | /* We should never emit switch_mm more than once */ | 724 | /* We should never emit switch_mm more than once */ |
734 | WARN_ON(needs_pd_load_pre(engine, to) && | 725 | WARN_ON(needs_pd_load_pre(engine, to) && |
735 | needs_pd_load_post(engine, to, hw_flags)); | 726 | needs_pd_load_post(to, hw_flags)); |
736 | |||
737 | ret = mi_set_context(req, hw_flags); | ||
738 | if (ret) | ||
739 | goto unpin_out; | ||
740 | 727 | ||
741 | /* GEN8 does *not* require an explicit reload if the PDPs have been | 728 | if (to != from || (hw_flags & MI_FORCE_RESTORE)) { |
742 | * setup, and we do not wish to move them. | 729 | ret = mi_set_context(req, hw_flags); |
743 | */ | ||
744 | if (needs_pd_load_post(engine, to, hw_flags)) { | ||
745 | trace_switch_mm(engine, to); | ||
746 | ret = to->ppgtt->switch_mm(to->ppgtt, req); | ||
747 | /* The hardware context switch is emitted, but we haven't | ||
748 | * actually changed the state - so it's probably safe to bail | ||
749 | * here. Still, let the user know something dangerous has | ||
750 | * happened. | ||
751 | */ | ||
752 | if (ret) { | ||
753 | DRM_ERROR("Failed to change address space on context switch\n"); | ||
754 | goto unpin_out; | ||
755 | } | ||
756 | } | ||
757 | |||
758 | for (i = 0; i < MAX_L3_SLICES; i++) { | ||
759 | if (!(to->remap_slice & (1<<i))) | ||
760 | continue; | ||
761 | |||
762 | ret = i915_gem_l3_remap(req, i); | ||
763 | /* If it failed, try again next round */ | ||
764 | if (ret) | 730 | if (ret) |
765 | DRM_DEBUG_DRIVER("L3 remapping failed\n"); | 731 | goto unpin_out; |
766 | else | ||
767 | to->remap_slice &= ~(1<<i); | ||
768 | } | 732 | } |
769 | 733 | ||
770 | /* The backing object for the context is done after switching to the | 734 | /* The backing object for the context is done after switching to the |
@@ -789,27 +753,51 @@ static int do_switch(struct drm_i915_gem_request *req) | |||
789 | i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); | 753 | i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); |
790 | i915_gem_context_unreference(from); | 754 | i915_gem_context_unreference(from); |
791 | } | 755 | } |
792 | |||
793 | uninitialized = !to->legacy_hw_ctx.initialized; | ||
794 | to->legacy_hw_ctx.initialized = true; | ||
795 | |||
796 | done: | ||
797 | i915_gem_context_reference(to); | 756 | i915_gem_context_reference(to); |
798 | engine->last_context = to; | 757 | engine->last_context = to; |
799 | 758 | ||
800 | if (uninitialized) { | 759 | /* GEN8 does *not* require an explicit reload if the PDPs have been |
760 | * setup, and we do not wish to move them. | ||
761 | */ | ||
762 | if (needs_pd_load_post(to, hw_flags)) { | ||
763 | trace_switch_mm(engine, to); | ||
764 | ret = to->ppgtt->switch_mm(to->ppgtt, req); | ||
765 | /* The hardware context switch is emitted, but we haven't | ||
766 | * actually changed the state - so it's probably safe to bail | ||
767 | * here. Still, let the user know something dangerous has | ||
768 | * happened. | ||
769 | */ | ||
770 | if (ret) | ||
771 | return ret; | ||
772 | } | ||
773 | |||
774 | if (to->ppgtt) | ||
775 | to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); | ||
776 | |||
777 | for (i = 0; i < MAX_L3_SLICES; i++) { | ||
778 | if (!(to->remap_slice & (1<<i))) | ||
779 | continue; | ||
780 | |||
781 | ret = i915_gem_l3_remap(req, i); | ||
782 | if (ret) | ||
783 | return ret; | ||
784 | |||
785 | to->remap_slice &= ~(1<<i); | ||
786 | } | ||
787 | |||
788 | if (!to->legacy_hw_ctx.initialized) { | ||
801 | if (engine->init_context) { | 789 | if (engine->init_context) { |
802 | ret = engine->init_context(req); | 790 | ret = engine->init_context(req); |
803 | if (ret) | 791 | if (ret) |
804 | DRM_ERROR("ring init context: %d\n", ret); | 792 | return ret; |
805 | } | 793 | } |
794 | to->legacy_hw_ctx.initialized = true; | ||
806 | } | 795 | } |
807 | 796 | ||
808 | return 0; | 797 | return 0; |
809 | 798 | ||
810 | unpin_out: | 799 | unpin_out: |
811 | if (engine->id == RCS) | 800 | i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state); |
812 | i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state); | ||
813 | return ret; | 801 | return ret; |
814 | } | 802 | } |
815 | 803 | ||
@@ -834,17 +822,33 @@ int i915_switch_context(struct drm_i915_gem_request *req) | |||
834 | WARN_ON(i915.enable_execlists); | 822 | WARN_ON(i915.enable_execlists); |
835 | WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); | 823 | WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); |
836 | 824 | ||
837 | if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */ | 825 | if (engine->id != RCS || |
838 | if (req->ctx != engine->last_context) { | 826 | req->ctx->legacy_hw_ctx.rcs_state == NULL) { |
839 | i915_gem_context_reference(req->ctx); | 827 | struct intel_context *to = req->ctx; |
828 | |||
829 | if (needs_pd_load_pre(engine, to)) { | ||
830 | int ret; | ||
831 | |||
832 | trace_switch_mm(engine, to); | ||
833 | ret = to->ppgtt->switch_mm(to->ppgtt, req); | ||
834 | if (ret) | ||
835 | return ret; | ||
836 | |||
837 | /* Doing a PD load always reloads the page dirs */ | ||
838 | to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); | ||
839 | } | ||
840 | |||
841 | if (to != engine->last_context) { | ||
842 | i915_gem_context_reference(to); | ||
840 | if (engine->last_context) | 843 | if (engine->last_context) |
841 | i915_gem_context_unreference(engine->last_context); | 844 | i915_gem_context_unreference(engine->last_context); |
842 | engine->last_context = req->ctx; | 845 | engine->last_context = to; |
843 | } | 846 | } |
847 | |||
844 | return 0; | 848 | return 0; |
845 | } | 849 | } |
846 | 850 | ||
847 | return do_switch(req); | 851 | return do_rcs_switch(req); |
848 | } | 852 | } |
849 | 853 | ||
850 | static bool contexts_enabled(struct drm_device *dev) | 854 | static bool contexts_enabled(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 6ee4f00f620c..6f4f2a6cdf93 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -1137,7 +1137,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, | |||
1137 | } | 1137 | } |
1138 | } | 1138 | } |
1139 | 1139 | ||
1140 | void | 1140 | static void |
1141 | i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params) | 1141 | i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params) |
1142 | { | 1142 | { |
1143 | /* Unconditionally force add_request to emit a full flush. */ | 1143 | /* Unconditionally force add_request to emit a full flush. */ |
@@ -1322,7 +1322,6 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, | |||
1322 | trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags); | 1322 | trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags); |
1323 | 1323 | ||
1324 | i915_gem_execbuffer_move_to_active(vmas, params->request); | 1324 | i915_gem_execbuffer_move_to_active(vmas, params->request); |
1325 | i915_gem_execbuffer_retire_commands(params); | ||
1326 | 1325 | ||
1327 | return 0; | 1326 | return 0; |
1328 | } | 1327 | } |
@@ -1624,7 +1623,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1624 | 1623 | ||
1625 | ret = i915_gem_request_add_to_client(req, file); | 1624 | ret = i915_gem_request_add_to_client(req, file); |
1626 | if (ret) | 1625 | if (ret) |
1627 | goto err_batch_unpin; | 1626 | goto err_request; |
1628 | 1627 | ||
1629 | /* | 1628 | /* |
1630 | * Save assorted stuff away to pass through to *_submission(). | 1629 | * Save assorted stuff away to pass through to *_submission(). |
@@ -1641,6 +1640,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1641 | params->request = req; | 1640 | params->request = req; |
1642 | 1641 | ||
1643 | ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas); | 1642 | ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas); |
1643 | err_request: | ||
1644 | i915_gem_execbuffer_retire_commands(params); | ||
1644 | 1645 | ||
1645 | err_batch_unpin: | 1646 | err_batch_unpin: |
1646 | /* | 1647 | /* |
@@ -1657,14 +1658,6 @@ err: | |||
1657 | i915_gem_context_unreference(ctx); | 1658 | i915_gem_context_unreference(ctx); |
1658 | eb_destroy(eb); | 1659 | eb_destroy(eb); |
1659 | 1660 | ||
1660 | /* | ||
1661 | * If the request was created but not successfully submitted then it | ||
1662 | * must be freed again. If it was submitted then it is being tracked | ||
1663 | * on the active request list and no clean up is required here. | ||
1664 | */ | ||
1665 | if (ret && !IS_ERR_OR_NULL(req)) | ||
1666 | i915_gem_request_cancel(req); | ||
1667 | |||
1668 | mutex_unlock(&dev->struct_mutex); | 1661 | mutex_unlock(&dev->struct_mutex); |
1669 | 1662 | ||
1670 | pre_mutex_err: | 1663 | pre_mutex_err: |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index c5cb04907525..0d666b3f7e9b 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -745,7 +745,7 @@ static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm, | |||
745 | num_entries--; | 745 | num_entries--; |
746 | } | 746 | } |
747 | 747 | ||
748 | kunmap_px(ppgtt, pt); | 748 | kunmap_px(ppgtt, pt_vaddr); |
749 | 749 | ||
750 | pte = 0; | 750 | pte = 0; |
751 | if (++pde == I915_PDES) { | 751 | if (++pde == I915_PDES) { |
@@ -905,11 +905,10 @@ static int gen8_init_scratch(struct i915_address_space *vm) | |||
905 | static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) | 905 | static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) |
906 | { | 906 | { |
907 | enum vgt_g2v_type msg; | 907 | enum vgt_g2v_type msg; |
908 | struct drm_device *dev = ppgtt->base.dev; | 908 | struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); |
909 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
910 | int i; | 909 | int i; |
911 | 910 | ||
912 | if (USES_FULL_48BIT_PPGTT(dev)) { | 911 | if (USES_FULL_48BIT_PPGTT(dev_priv)) { |
913 | u64 daddr = px_dma(&ppgtt->pml4); | 912 | u64 daddr = px_dma(&ppgtt->pml4); |
914 | 913 | ||
915 | I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr)); | 914 | I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr)); |
@@ -3172,7 +3171,8 @@ int i915_ggtt_init_hw(struct drm_device *dev) | |||
3172 | } else if (INTEL_INFO(dev)->gen < 8) { | 3171 | } else if (INTEL_INFO(dev)->gen < 8) { |
3173 | ggtt->probe = gen6_gmch_probe; | 3172 | ggtt->probe = gen6_gmch_probe; |
3174 | ggtt->base.cleanup = gen6_gmch_remove; | 3173 | ggtt->base.cleanup = gen6_gmch_remove; |
3175 | if (IS_HASWELL(dev) && dev_priv->ellc_size) | 3174 | |
3175 | if (HAS_EDRAM(dev)) | ||
3176 | ggtt->base.pte_encode = iris_pte_encode; | 3176 | ggtt->base.pte_encode = iris_pte_encode; |
3177 | else if (IS_HASWELL(dev)) | 3177 | else if (IS_HASWELL(dev)) |
3178 | ggtt->base.pte_encode = hsw_pte_encode; | 3178 | ggtt->base.pte_encode = hsw_pte_encode; |
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index d46388f25e04..425e721aac58 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c | |||
@@ -70,6 +70,10 @@ static bool swap_available(void) | |||
70 | 70 | ||
71 | static bool can_release_pages(struct drm_i915_gem_object *obj) | 71 | static bool can_release_pages(struct drm_i915_gem_object *obj) |
72 | { | 72 | { |
73 | /* Only shmemfs objects are backed by swap */ | ||
74 | if (!obj->base.filp) | ||
75 | return false; | ||
76 | |||
73 | /* Only report true if by unbinding the object and putting its pages | 77 | /* Only report true if by unbinding the object and putting its pages |
74 | * we can actually make forward progress towards freeing physical | 78 | * we can actually make forward progress towards freeing physical |
75 | * pages. | 79 | * pages. |
@@ -336,7 +340,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) | |||
336 | container_of(nb, struct drm_i915_private, mm.oom_notifier); | 340 | container_of(nb, struct drm_i915_private, mm.oom_notifier); |
337 | struct shrinker_lock_uninterruptible slu; | 341 | struct shrinker_lock_uninterruptible slu; |
338 | struct drm_i915_gem_object *obj; | 342 | struct drm_i915_gem_object *obj; |
339 | unsigned long pinned, bound, unbound, freed_pages; | 343 | unsigned long unevictable, bound, unbound, freed_pages; |
340 | 344 | ||
341 | if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) | 345 | if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) |
342 | return NOTIFY_DONE; | 346 | return NOTIFY_DONE; |
@@ -347,33 +351,28 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) | |||
347 | * assert that there are no objects with pinned pages that are not | 351 | * assert that there are no objects with pinned pages that are not |
348 | * being pointed to by hardware. | 352 | * being pointed to by hardware. |
349 | */ | 353 | */ |
350 | unbound = bound = pinned = 0; | 354 | unbound = bound = unevictable = 0; |
351 | list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { | 355 | list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { |
352 | if (!obj->base.filp) /* not backed by a freeable object */ | 356 | if (!can_release_pages(obj)) |
353 | continue; | 357 | unevictable += obj->base.size >> PAGE_SHIFT; |
354 | |||
355 | if (obj->pages_pin_count) | ||
356 | pinned += obj->base.size; | ||
357 | else | 358 | else |
358 | unbound += obj->base.size; | 359 | unbound += obj->base.size >> PAGE_SHIFT; |
359 | } | 360 | } |
360 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { | 361 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
361 | if (!obj->base.filp) | 362 | if (!can_release_pages(obj)) |
362 | continue; | 363 | unevictable += obj->base.size >> PAGE_SHIFT; |
363 | |||
364 | if (obj->pages_pin_count) | ||
365 | pinned += obj->base.size; | ||
366 | else | 364 | else |
367 | bound += obj->base.size; | 365 | bound += obj->base.size >> PAGE_SHIFT; |
368 | } | 366 | } |
369 | 367 | ||
370 | i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu); | 368 | i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu); |
371 | 369 | ||
372 | if (freed_pages || unbound || bound) | 370 | if (freed_pages || unbound || bound) |
373 | pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n", | 371 | pr_info("Purging GPU memory, %lu pages freed, " |
374 | freed_pages << PAGE_SHIFT, pinned); | 372 | "%lu pages still pinned.\n", |
373 | freed_pages, unevictable); | ||
375 | if (unbound || bound) | 374 | if (unbound || bound) |
376 | pr_err("%lu and %lu bytes still available in the " | 375 | pr_err("%lu and %lu pages still available in the " |
377 | "bound and unbound GPU page lists.\n", | 376 | "bound and unbound GPU page lists.\n", |
378 | bound, unbound); | 377 | bound, unbound); |
379 | 378 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index ea06da012d32..b7ce963fb8f8 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c | |||
@@ -95,9 +95,9 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) | |||
95 | u32 base; | 95 | u32 base; |
96 | 96 | ||
97 | /* Almost universally we can find the Graphics Base of Stolen Memory | 97 | /* Almost universally we can find the Graphics Base of Stolen Memory |
98 | * at offset 0x5c in the igfx configuration space. On a few (desktop) | 98 | * at register BSM (0x5c) in the igfx configuration space. On a few |
99 | * machines this is also mirrored in the bridge device at different | 99 | * (desktop) machines this is also mirrored in the bridge device at |
100 | * locations, or in the MCHBAR. | 100 | * different locations, or in the MCHBAR. |
101 | * | 101 | * |
102 | * On 865 we just check the TOUD register. | 102 | * On 865 we just check the TOUD register. |
103 | * | 103 | * |
@@ -107,9 +107,11 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) | |||
107 | */ | 107 | */ |
108 | base = 0; | 108 | base = 0; |
109 | if (INTEL_INFO(dev)->gen >= 3) { | 109 | if (INTEL_INFO(dev)->gen >= 3) { |
110 | /* Read Graphics Base of Stolen Memory directly */ | 110 | u32 bsm; |
111 | pci_read_config_dword(dev->pdev, 0x5c, &base); | 111 | |
112 | base &= ~((1<<20) - 1); | 112 | pci_read_config_dword(dev->pdev, BSM, &bsm); |
113 | |||
114 | base = bsm & BSM_MASK; | ||
113 | } else if (IS_I865G(dev)) { | 115 | } else if (IS_I865G(dev)) { |
114 | u16 toud = 0; | 116 | u16 toud = 0; |
115 | 117 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 0f94b6c5c9cc..32d9726e38b1 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c | |||
@@ -34,7 +34,7 @@ | |||
34 | 34 | ||
35 | struct i915_mm_struct { | 35 | struct i915_mm_struct { |
36 | struct mm_struct *mm; | 36 | struct mm_struct *mm; |
37 | struct drm_device *dev; | 37 | struct drm_i915_private *i915; |
38 | struct i915_mmu_notifier *mn; | 38 | struct i915_mmu_notifier *mn; |
39 | struct hlist_node node; | 39 | struct hlist_node node; |
40 | struct kref kref; | 40 | struct kref kref; |
@@ -49,6 +49,7 @@ struct i915_mmu_notifier { | |||
49 | struct hlist_node node; | 49 | struct hlist_node node; |
50 | struct mmu_notifier mn; | 50 | struct mmu_notifier mn; |
51 | struct rb_root objects; | 51 | struct rb_root objects; |
52 | struct workqueue_struct *wq; | ||
52 | }; | 53 | }; |
53 | 54 | ||
54 | struct i915_mmu_object { | 55 | struct i915_mmu_object { |
@@ -60,6 +61,37 @@ struct i915_mmu_object { | |||
60 | bool attached; | 61 | bool attached; |
61 | }; | 62 | }; |
62 | 63 | ||
64 | static void wait_rendering(struct drm_i915_gem_object *obj) | ||
65 | { | ||
66 | struct drm_device *dev = obj->base.dev; | ||
67 | struct drm_i915_gem_request *requests[I915_NUM_ENGINES]; | ||
68 | int i, n; | ||
69 | |||
70 | if (!obj->active) | ||
71 | return; | ||
72 | |||
73 | n = 0; | ||
74 | for (i = 0; i < I915_NUM_ENGINES; i++) { | ||
75 | struct drm_i915_gem_request *req; | ||
76 | |||
77 | req = obj->last_read_req[i]; | ||
78 | if (req == NULL) | ||
79 | continue; | ||
80 | |||
81 | requests[n++] = i915_gem_request_reference(req); | ||
82 | } | ||
83 | |||
84 | mutex_unlock(&dev->struct_mutex); | ||
85 | |||
86 | for (i = 0; i < n; i++) | ||
87 | __i915_wait_request(requests[i], false, NULL, NULL); | ||
88 | |||
89 | mutex_lock(&dev->struct_mutex); | ||
90 | |||
91 | for (i = 0; i < n; i++) | ||
92 | i915_gem_request_unreference(requests[i]); | ||
93 | } | ||
94 | |||
63 | static void cancel_userptr(struct work_struct *work) | 95 | static void cancel_userptr(struct work_struct *work) |
64 | { | 96 | { |
65 | struct i915_mmu_object *mo = container_of(work, typeof(*mo), work); | 97 | struct i915_mmu_object *mo = container_of(work, typeof(*mo), work); |
@@ -75,13 +107,13 @@ static void cancel_userptr(struct work_struct *work) | |||
75 | struct i915_vma *vma, *tmp; | 107 | struct i915_vma *vma, *tmp; |
76 | bool was_interruptible; | 108 | bool was_interruptible; |
77 | 109 | ||
110 | wait_rendering(obj); | ||
111 | |||
78 | was_interruptible = dev_priv->mm.interruptible; | 112 | was_interruptible = dev_priv->mm.interruptible; |
79 | dev_priv->mm.interruptible = false; | 113 | dev_priv->mm.interruptible = false; |
80 | 114 | ||
81 | list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) { | 115 | list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) |
82 | int ret = i915_vma_unbind(vma); | 116 | WARN_ON(i915_vma_unbind(vma)); |
83 | WARN_ON(ret && ret != -EIO); | ||
84 | } | ||
85 | WARN_ON(i915_gem_object_put_pages(obj)); | 117 | WARN_ON(i915_gem_object_put_pages(obj)); |
86 | 118 | ||
87 | dev_priv->mm.interruptible = was_interruptible; | 119 | dev_priv->mm.interruptible = was_interruptible; |
@@ -140,7 +172,7 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, | |||
140 | */ | 172 | */ |
141 | mo = container_of(it, struct i915_mmu_object, it); | 173 | mo = container_of(it, struct i915_mmu_object, it); |
142 | if (kref_get_unless_zero(&mo->obj->base.refcount)) | 174 | if (kref_get_unless_zero(&mo->obj->base.refcount)) |
143 | schedule_work(&mo->work); | 175 | queue_work(mn->wq, &mo->work); |
144 | 176 | ||
145 | list_add(&mo->link, &cancelled); | 177 | list_add(&mo->link, &cancelled); |
146 | it = interval_tree_iter_next(it, start, end); | 178 | it = interval_tree_iter_next(it, start, end); |
@@ -148,6 +180,8 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, | |||
148 | list_for_each_entry(mo, &cancelled, link) | 180 | list_for_each_entry(mo, &cancelled, link) |
149 | del_object(mo); | 181 | del_object(mo); |
150 | spin_unlock(&mn->lock); | 182 | spin_unlock(&mn->lock); |
183 | |||
184 | flush_workqueue(mn->wq); | ||
151 | } | 185 | } |
152 | 186 | ||
153 | static const struct mmu_notifier_ops i915_gem_userptr_notifier = { | 187 | static const struct mmu_notifier_ops i915_gem_userptr_notifier = { |
@@ -167,10 +201,16 @@ i915_mmu_notifier_create(struct mm_struct *mm) | |||
167 | spin_lock_init(&mn->lock); | 201 | spin_lock_init(&mn->lock); |
168 | mn->mn.ops = &i915_gem_userptr_notifier; | 202 | mn->mn.ops = &i915_gem_userptr_notifier; |
169 | mn->objects = RB_ROOT; | 203 | mn->objects = RB_ROOT; |
204 | mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0); | ||
205 | if (mn->wq == NULL) { | ||
206 | kfree(mn); | ||
207 | return ERR_PTR(-ENOMEM); | ||
208 | } | ||
170 | 209 | ||
171 | /* Protected by mmap_sem (write-lock) */ | 210 | /* Protected by mmap_sem (write-lock) */ |
172 | ret = __mmu_notifier_register(&mn->mn, mm); | 211 | ret = __mmu_notifier_register(&mn->mn, mm); |
173 | if (ret) { | 212 | if (ret) { |
213 | destroy_workqueue(mn->wq); | ||
174 | kfree(mn); | 214 | kfree(mn); |
175 | return ERR_PTR(ret); | 215 | return ERR_PTR(ret); |
176 | } | 216 | } |
@@ -205,13 +245,13 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm) | |||
205 | return mn; | 245 | return mn; |
206 | 246 | ||
207 | down_write(&mm->mm->mmap_sem); | 247 | down_write(&mm->mm->mmap_sem); |
208 | mutex_lock(&to_i915(mm->dev)->mm_lock); | 248 | mutex_lock(&mm->i915->mm_lock); |
209 | if ((mn = mm->mn) == NULL) { | 249 | if ((mn = mm->mn) == NULL) { |
210 | mn = i915_mmu_notifier_create(mm->mm); | 250 | mn = i915_mmu_notifier_create(mm->mm); |
211 | if (!IS_ERR(mn)) | 251 | if (!IS_ERR(mn)) |
212 | mm->mn = mn; | 252 | mm->mn = mn; |
213 | } | 253 | } |
214 | mutex_unlock(&to_i915(mm->dev)->mm_lock); | 254 | mutex_unlock(&mm->i915->mm_lock); |
215 | up_write(&mm->mm->mmap_sem); | 255 | up_write(&mm->mm->mmap_sem); |
216 | 256 | ||
217 | return mn; | 257 | return mn; |
@@ -256,6 +296,7 @@ i915_mmu_notifier_free(struct i915_mmu_notifier *mn, | |||
256 | return; | 296 | return; |
257 | 297 | ||
258 | mmu_notifier_unregister(&mn->mn, mm); | 298 | mmu_notifier_unregister(&mn->mn, mm); |
299 | destroy_workqueue(mn->wq); | ||
259 | kfree(mn); | 300 | kfree(mn); |
260 | } | 301 | } |
261 | 302 | ||
@@ -327,7 +368,7 @@ i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj) | |||
327 | } | 368 | } |
328 | 369 | ||
329 | kref_init(&mm->kref); | 370 | kref_init(&mm->kref); |
330 | mm->dev = obj->base.dev; | 371 | mm->i915 = to_i915(obj->base.dev); |
331 | 372 | ||
332 | mm->mm = current->mm; | 373 | mm->mm = current->mm; |
333 | atomic_inc(¤t->mm->mm_count); | 374 | atomic_inc(¤t->mm->mm_count); |
@@ -362,7 +403,7 @@ __i915_mm_struct_free(struct kref *kref) | |||
362 | 403 | ||
363 | /* Protected by dev_priv->mm_lock */ | 404 | /* Protected by dev_priv->mm_lock */ |
364 | hash_del(&mm->node); | 405 | hash_del(&mm->node); |
365 | mutex_unlock(&to_i915(mm->dev)->mm_lock); | 406 | mutex_unlock(&mm->i915->mm_lock); |
366 | 407 | ||
367 | INIT_WORK(&mm->work, __i915_mm_struct_free__worker); | 408 | INIT_WORK(&mm->work, __i915_mm_struct_free__worker); |
368 | schedule_work(&mm->work); | 409 | schedule_work(&mm->work); |
@@ -498,19 +539,24 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) | |||
498 | if (pvec != NULL) { | 539 | if (pvec != NULL) { |
499 | struct mm_struct *mm = obj->userptr.mm->mm; | 540 | struct mm_struct *mm = obj->userptr.mm->mm; |
500 | 541 | ||
501 | down_read(&mm->mmap_sem); | 542 | ret = -EFAULT; |
502 | while (pinned < npages) { | 543 | if (atomic_inc_not_zero(&mm->mm_users)) { |
503 | ret = get_user_pages_remote(work->task, mm, | 544 | down_read(&mm->mmap_sem); |
504 | obj->userptr.ptr + pinned * PAGE_SIZE, | 545 | while (pinned < npages) { |
505 | npages - pinned, | 546 | ret = get_user_pages_remote |
506 | !obj->userptr.read_only, 0, | 547 | (work->task, mm, |
507 | pvec + pinned, NULL); | 548 | obj->userptr.ptr + pinned * PAGE_SIZE, |
508 | if (ret < 0) | 549 | npages - pinned, |
509 | break; | 550 | !obj->userptr.read_only, 0, |
510 | 551 | pvec + pinned, NULL); | |
511 | pinned += ret; | 552 | if (ret < 0) |
553 | break; | ||
554 | |||
555 | pinned += ret; | ||
556 | } | ||
557 | up_read(&mm->mmap_sem); | ||
558 | mmput(mm); | ||
512 | } | 559 | } |
513 | up_read(&mm->mmap_sem); | ||
514 | } | 560 | } |
515 | 561 | ||
516 | mutex_lock(&dev->struct_mutex); | 562 | mutex_lock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index da86bdbba275..d40c13fb6643 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c | |||
@@ -179,15 +179,11 @@ static void guc_init_doorbell(struct intel_guc *guc, | |||
179 | struct i915_guc_client *client) | 179 | struct i915_guc_client *client) |
180 | { | 180 | { |
181 | struct guc_doorbell_info *doorbell; | 181 | struct guc_doorbell_info *doorbell; |
182 | void *base; | ||
183 | 182 | ||
184 | base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0)); | 183 | doorbell = client->client_base + client->doorbell_offset; |
185 | doorbell = base + client->doorbell_offset; | ||
186 | 184 | ||
187 | doorbell->db_status = 1; | 185 | doorbell->db_status = GUC_DOORBELL_ENABLED; |
188 | doorbell->cookie = 0; | 186 | doorbell->cookie = 0; |
189 | |||
190 | kunmap_atomic(base); | ||
191 | } | 187 | } |
192 | 188 | ||
193 | static int guc_ring_doorbell(struct i915_guc_client *gc) | 189 | static int guc_ring_doorbell(struct i915_guc_client *gc) |
@@ -195,11 +191,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc) | |||
195 | struct guc_process_desc *desc; | 191 | struct guc_process_desc *desc; |
196 | union guc_doorbell_qw db_cmp, db_exc, db_ret; | 192 | union guc_doorbell_qw db_cmp, db_exc, db_ret; |
197 | union guc_doorbell_qw *db; | 193 | union guc_doorbell_qw *db; |
198 | void *base; | ||
199 | int attempt = 2, ret = -EAGAIN; | 194 | int attempt = 2, ret = -EAGAIN; |
200 | 195 | ||
201 | base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0)); | 196 | desc = gc->client_base + gc->proc_desc_offset; |
202 | desc = base + gc->proc_desc_offset; | ||
203 | 197 | ||
204 | /* Update the tail so it is visible to GuC */ | 198 | /* Update the tail so it is visible to GuC */ |
205 | desc->tail = gc->wq_tail; | 199 | desc->tail = gc->wq_tail; |
@@ -215,7 +209,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc) | |||
215 | db_exc.cookie = 1; | 209 | db_exc.cookie = 1; |
216 | 210 | ||
217 | /* pointer of current doorbell cacheline */ | 211 | /* pointer of current doorbell cacheline */ |
218 | db = base + gc->doorbell_offset; | 212 | db = gc->client_base + gc->doorbell_offset; |
219 | 213 | ||
220 | while (attempt--) { | 214 | while (attempt--) { |
221 | /* lets ring the doorbell */ | 215 | /* lets ring the doorbell */ |
@@ -244,10 +238,6 @@ static int guc_ring_doorbell(struct i915_guc_client *gc) | |||
244 | db_exc.cookie = 1; | 238 | db_exc.cookie = 1; |
245 | } | 239 | } |
246 | 240 | ||
247 | /* Finally, update the cached copy of the GuC's WQ head */ | ||
248 | gc->wq_head = desc->head; | ||
249 | |||
250 | kunmap_atomic(base); | ||
251 | return ret; | 241 | return ret; |
252 | } | 242 | } |
253 | 243 | ||
@@ -256,16 +246,12 @@ static void guc_disable_doorbell(struct intel_guc *guc, | |||
256 | { | 246 | { |
257 | struct drm_i915_private *dev_priv = guc_to_i915(guc); | 247 | struct drm_i915_private *dev_priv = guc_to_i915(guc); |
258 | struct guc_doorbell_info *doorbell; | 248 | struct guc_doorbell_info *doorbell; |
259 | void *base; | ||
260 | i915_reg_t drbreg = GEN8_DRBREGL(client->doorbell_id); | 249 | i915_reg_t drbreg = GEN8_DRBREGL(client->doorbell_id); |
261 | int value; | 250 | int value; |
262 | 251 | ||
263 | base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0)); | 252 | doorbell = client->client_base + client->doorbell_offset; |
264 | doorbell = base + client->doorbell_offset; | ||
265 | |||
266 | doorbell->db_status = 0; | ||
267 | 253 | ||
268 | kunmap_atomic(base); | 254 | doorbell->db_status = GUC_DOORBELL_DISABLED; |
269 | 255 | ||
270 | I915_WRITE(drbreg, I915_READ(drbreg) & ~GEN8_DRB_VALID); | 256 | I915_WRITE(drbreg, I915_READ(drbreg) & ~GEN8_DRB_VALID); |
271 | 257 | ||
@@ -341,10 +327,8 @@ static void guc_init_proc_desc(struct intel_guc *guc, | |||
341 | struct i915_guc_client *client) | 327 | struct i915_guc_client *client) |
342 | { | 328 | { |
343 | struct guc_process_desc *desc; | 329 | struct guc_process_desc *desc; |
344 | void *base; | ||
345 | 330 | ||
346 | base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0)); | 331 | desc = client->client_base + client->proc_desc_offset; |
347 | desc = base + client->proc_desc_offset; | ||
348 | 332 | ||
349 | memset(desc, 0, sizeof(*desc)); | 333 | memset(desc, 0, sizeof(*desc)); |
350 | 334 | ||
@@ -361,8 +345,6 @@ static void guc_init_proc_desc(struct intel_guc *guc, | |||
361 | desc->wq_size_bytes = client->wq_size; | 345 | desc->wq_size_bytes = client->wq_size; |
362 | desc->wq_status = WQ_STATUS_ACTIVE; | 346 | desc->wq_status = WQ_STATUS_ACTIVE; |
363 | desc->priority = client->priority; | 347 | desc->priority = client->priority; |
364 | |||
365 | kunmap_atomic(base); | ||
366 | } | 348 | } |
367 | 349 | ||
368 | /* | 350 | /* |
@@ -376,12 +358,14 @@ static void guc_init_proc_desc(struct intel_guc *guc, | |||
376 | static void guc_init_ctx_desc(struct intel_guc *guc, | 358 | static void guc_init_ctx_desc(struct intel_guc *guc, |
377 | struct i915_guc_client *client) | 359 | struct i915_guc_client *client) |
378 | { | 360 | { |
361 | struct drm_i915_gem_object *client_obj = client->client_obj; | ||
379 | struct drm_i915_private *dev_priv = guc_to_i915(guc); | 362 | struct drm_i915_private *dev_priv = guc_to_i915(guc); |
380 | struct intel_engine_cs *engine; | 363 | struct intel_engine_cs *engine; |
381 | struct intel_context *ctx = client->owner; | 364 | struct intel_context *ctx = client->owner; |
382 | struct guc_context_desc desc; | 365 | struct guc_context_desc desc; |
383 | struct sg_table *sg; | 366 | struct sg_table *sg; |
384 | enum intel_engine_id id; | 367 | enum intel_engine_id id; |
368 | u32 gfx_addr; | ||
385 | 369 | ||
386 | memset(&desc, 0, sizeof(desc)); | 370 | memset(&desc, 0, sizeof(desc)); |
387 | 371 | ||
@@ -410,16 +394,17 @@ static void guc_init_ctx_desc(struct intel_guc *guc, | |||
410 | lrc->context_desc = (u32)ctx_desc; | 394 | lrc->context_desc = (u32)ctx_desc; |
411 | 395 | ||
412 | /* The state page is after PPHWSP */ | 396 | /* The state page is after PPHWSP */ |
413 | lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) + | 397 | gfx_addr = i915_gem_obj_ggtt_offset(obj); |
414 | LRC_STATE_PN * PAGE_SIZE; | 398 | lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE; |
415 | lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) | | 399 | lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) | |
416 | (engine->guc_id << GUC_ELC_ENGINE_OFFSET); | 400 | (engine->guc_id << GUC_ELC_ENGINE_OFFSET); |
417 | 401 | ||
418 | obj = ctx->engine[id].ringbuf->obj; | 402 | obj = ctx->engine[id].ringbuf->obj; |
403 | gfx_addr = i915_gem_obj_ggtt_offset(obj); | ||
419 | 404 | ||
420 | lrc->ring_begin = i915_gem_obj_ggtt_offset(obj); | 405 | lrc->ring_begin = gfx_addr; |
421 | lrc->ring_end = lrc->ring_begin + obj->base.size - 1; | 406 | lrc->ring_end = gfx_addr + obj->base.size - 1; |
422 | lrc->ring_next_free_location = lrc->ring_begin; | 407 | lrc->ring_next_free_location = gfx_addr; |
423 | lrc->ring_current_tail_pointer_value = 0; | 408 | lrc->ring_current_tail_pointer_value = 0; |
424 | 409 | ||
425 | desc.engines_used |= (1 << engine->guc_id); | 410 | desc.engines_used |= (1 << engine->guc_id); |
@@ -428,22 +413,17 @@ static void guc_init_ctx_desc(struct intel_guc *guc, | |||
428 | WARN_ON(desc.engines_used == 0); | 413 | WARN_ON(desc.engines_used == 0); |
429 | 414 | ||
430 | /* | 415 | /* |
431 | * The CPU address is only needed at certain points, so kmap_atomic on | 416 | * The doorbell, process descriptor, and workqueue are all parts |
432 | * demand instead of storing it in the ctx descriptor. | 417 | * of the client object, which the GuC will reference via the GGTT |
433 | * XXX: May make debug easier to have it mapped | ||
434 | */ | 418 | */ |
435 | desc.db_trigger_cpu = 0; | 419 | gfx_addr = i915_gem_obj_ggtt_offset(client_obj); |
436 | desc.db_trigger_uk = client->doorbell_offset + | 420 | desc.db_trigger_phy = sg_dma_address(client_obj->pages->sgl) + |
437 | i915_gem_obj_ggtt_offset(client->client_obj); | 421 | client->doorbell_offset; |
438 | desc.db_trigger_phy = client->doorbell_offset + | 422 | desc.db_trigger_cpu = (uintptr_t)client->client_base + |
439 | sg_dma_address(client->client_obj->pages->sgl); | 423 | client->doorbell_offset; |
440 | 424 | desc.db_trigger_uk = gfx_addr + client->doorbell_offset; | |
441 | desc.process_desc = client->proc_desc_offset + | 425 | desc.process_desc = gfx_addr + client->proc_desc_offset; |
442 | i915_gem_obj_ggtt_offset(client->client_obj); | 426 | desc.wq_addr = gfx_addr + client->wq_offset; |
443 | |||
444 | desc.wq_addr = client->wq_offset + | ||
445 | i915_gem_obj_ggtt_offset(client->client_obj); | ||
446 | |||
447 | desc.wq_size = client->wq_size; | 427 | desc.wq_size = client->wq_size; |
448 | 428 | ||
449 | /* | 429 | /* |
@@ -474,25 +454,16 @@ static void guc_fini_ctx_desc(struct intel_guc *guc, | |||
474 | int i915_guc_wq_check_space(struct i915_guc_client *gc) | 454 | int i915_guc_wq_check_space(struct i915_guc_client *gc) |
475 | { | 455 | { |
476 | struct guc_process_desc *desc; | 456 | struct guc_process_desc *desc; |
477 | void *base; | ||
478 | u32 size = sizeof(struct guc_wq_item); | 457 | u32 size = sizeof(struct guc_wq_item); |
479 | int ret = -ETIMEDOUT, timeout_counter = 200; | 458 | int ret = -ETIMEDOUT, timeout_counter = 200; |
480 | 459 | ||
481 | if (!gc) | 460 | if (!gc) |
482 | return 0; | 461 | return 0; |
483 | 462 | ||
484 | /* Quickly return if wq space is available since last time we cache the | 463 | desc = gc->client_base + gc->proc_desc_offset; |
485 | * head position. */ | ||
486 | if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) | ||
487 | return 0; | ||
488 | |||
489 | base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0)); | ||
490 | desc = base + gc->proc_desc_offset; | ||
491 | 464 | ||
492 | while (timeout_counter-- > 0) { | 465 | while (timeout_counter-- > 0) { |
493 | gc->wq_head = desc->head; | 466 | if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) { |
494 | |||
495 | if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) { | ||
496 | ret = 0; | 467 | ret = 0; |
497 | break; | 468 | break; |
498 | } | 469 | } |
@@ -501,19 +472,19 @@ int i915_guc_wq_check_space(struct i915_guc_client *gc) | |||
501 | usleep_range(1000, 2000); | 472 | usleep_range(1000, 2000); |
502 | }; | 473 | }; |
503 | 474 | ||
504 | kunmap_atomic(base); | ||
505 | |||
506 | return ret; | 475 | return ret; |
507 | } | 476 | } |
508 | 477 | ||
509 | static int guc_add_workqueue_item(struct i915_guc_client *gc, | 478 | static int guc_add_workqueue_item(struct i915_guc_client *gc, |
510 | struct drm_i915_gem_request *rq) | 479 | struct drm_i915_gem_request *rq) |
511 | { | 480 | { |
481 | struct guc_process_desc *desc; | ||
512 | struct guc_wq_item *wqi; | 482 | struct guc_wq_item *wqi; |
513 | void *base; | 483 | void *base; |
514 | u32 tail, wq_len, wq_off, space; | 484 | u32 tail, wq_len, wq_off, space; |
515 | 485 | ||
516 | space = CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size); | 486 | desc = gc->client_base + gc->proc_desc_offset; |
487 | space = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size); | ||
517 | if (WARN_ON(space < sizeof(struct guc_wq_item))) | 488 | if (WARN_ON(space < sizeof(struct guc_wq_item))) |
518 | return -ENOSPC; /* shouldn't happen */ | 489 | return -ENOSPC; /* shouldn't happen */ |
519 | 490 | ||
@@ -661,21 +632,28 @@ static void guc_client_free(struct drm_device *dev, | |||
661 | if (!client) | 632 | if (!client) |
662 | return; | 633 | return; |
663 | 634 | ||
664 | if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) { | ||
665 | /* | ||
666 | * First disable the doorbell, then tell the GuC we've | ||
667 | * finished with it, finally deallocate it in our bitmap | ||
668 | */ | ||
669 | guc_disable_doorbell(guc, client); | ||
670 | host2guc_release_doorbell(guc, client); | ||
671 | release_doorbell(guc, client->doorbell_id); | ||
672 | } | ||
673 | |||
674 | /* | 635 | /* |
675 | * XXX: wait for any outstanding submissions before freeing memory. | 636 | * XXX: wait for any outstanding submissions before freeing memory. |
676 | * Be sure to drop any locks | 637 | * Be sure to drop any locks |
677 | */ | 638 | */ |
678 | 639 | ||
640 | if (client->client_base) { | ||
641 | /* | ||
642 | * If we got as far as setting up a doorbell, make sure | ||
643 | * we shut it down before unmapping & deallocating the | ||
644 | * memory. So first disable the doorbell, then tell the | ||
645 | * GuC that we've finished with it, finally deallocate | ||
646 | * it in our bitmap | ||
647 | */ | ||
648 | if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) { | ||
649 | guc_disable_doorbell(guc, client); | ||
650 | host2guc_release_doorbell(guc, client); | ||
651 | release_doorbell(guc, client->doorbell_id); | ||
652 | } | ||
653 | |||
654 | kunmap(kmap_to_page(client->client_base)); | ||
655 | } | ||
656 | |||
679 | gem_release_guc_obj(client->client_obj); | 657 | gem_release_guc_obj(client->client_obj); |
680 | 658 | ||
681 | if (client->ctx_index != GUC_INVALID_CTX_ID) { | 659 | if (client->ctx_index != GUC_INVALID_CTX_ID) { |
@@ -696,7 +674,7 @@ static void guc_client_free(struct drm_device *dev, | |||
696 | * @ctx: the context that owns the client (we use the default render | 674 | * @ctx: the context that owns the client (we use the default render |
697 | * context) | 675 | * context) |
698 | * | 676 | * |
699 | * Return: An i915_guc_client object if success. | 677 | * Return: An i915_guc_client object if success, else NULL. |
700 | */ | 678 | */ |
701 | static struct i915_guc_client *guc_client_alloc(struct drm_device *dev, | 679 | static struct i915_guc_client *guc_client_alloc(struct drm_device *dev, |
702 | uint32_t priority, | 680 | uint32_t priority, |
@@ -728,7 +706,9 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev, | |||
728 | if (!obj) | 706 | if (!obj) |
729 | goto err; | 707 | goto err; |
730 | 708 | ||
709 | /* We'll keep just the first (doorbell/proc) page permanently kmap'd. */ | ||
731 | client->client_obj = obj; | 710 | client->client_obj = obj; |
711 | client->client_base = kmap(i915_gem_object_get_page(obj, 0)); | ||
732 | client->wq_offset = GUC_DB_SIZE; | 712 | client->wq_offset = GUC_DB_SIZE; |
733 | client->wq_size = GUC_WQ_SIZE; | 713 | client->wq_size = GUC_WQ_SIZE; |
734 | 714 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 679f08c944ef..2f6fd33c07ba 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -1264,18 +1264,17 @@ out: | |||
1264 | mutex_unlock(&dev_priv->dev->struct_mutex); | 1264 | mutex_unlock(&dev_priv->dev->struct_mutex); |
1265 | } | 1265 | } |
1266 | 1266 | ||
1267 | static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) | 1267 | static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, |
1268 | u32 iir) | ||
1268 | { | 1269 | { |
1269 | struct drm_i915_private *dev_priv = dev->dev_private; | 1270 | if (!HAS_L3_DPF(dev_priv)) |
1270 | |||
1271 | if (!HAS_L3_DPF(dev)) | ||
1272 | return; | 1271 | return; |
1273 | 1272 | ||
1274 | spin_lock(&dev_priv->irq_lock); | 1273 | spin_lock(&dev_priv->irq_lock); |
1275 | gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); | 1274 | gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); |
1276 | spin_unlock(&dev_priv->irq_lock); | 1275 | spin_unlock(&dev_priv->irq_lock); |
1277 | 1276 | ||
1278 | iir &= GT_PARITY_ERROR(dev); | 1277 | iir &= GT_PARITY_ERROR(dev_priv); |
1279 | if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) | 1278 | if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) |
1280 | dev_priv->l3_parity.which_slice |= 1 << 1; | 1279 | dev_priv->l3_parity.which_slice |= 1 << 1; |
1281 | 1280 | ||
@@ -1285,8 +1284,7 @@ static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) | |||
1285 | queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); | 1284 | queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); |
1286 | } | 1285 | } |
1287 | 1286 | ||
1288 | static void ilk_gt_irq_handler(struct drm_device *dev, | 1287 | static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, |
1289 | struct drm_i915_private *dev_priv, | ||
1290 | u32 gt_iir) | 1288 | u32 gt_iir) |
1291 | { | 1289 | { |
1292 | if (gt_iir & | 1290 | if (gt_iir & |
@@ -1296,8 +1294,7 @@ static void ilk_gt_irq_handler(struct drm_device *dev, | |||
1296 | notify_ring(&dev_priv->engine[VCS]); | 1294 | notify_ring(&dev_priv->engine[VCS]); |
1297 | } | 1295 | } |
1298 | 1296 | ||
1299 | static void snb_gt_irq_handler(struct drm_device *dev, | 1297 | static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, |
1300 | struct drm_i915_private *dev_priv, | ||
1301 | u32 gt_iir) | 1298 | u32 gt_iir) |
1302 | { | 1299 | { |
1303 | 1300 | ||
@@ -1314,8 +1311,8 @@ static void snb_gt_irq_handler(struct drm_device *dev, | |||
1314 | GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) | 1311 | GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) |
1315 | DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); | 1312 | DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); |
1316 | 1313 | ||
1317 | if (gt_iir & GT_PARITY_ERROR(dev)) | 1314 | if (gt_iir & GT_PARITY_ERROR(dev_priv)) |
1318 | ivybridge_parity_error_irq_handler(dev, gt_iir); | 1315 | ivybridge_parity_error_irq_handler(dev_priv, gt_iir); |
1319 | } | 1316 | } |
1320 | 1317 | ||
1321 | static __always_inline void | 1318 | static __always_inline void |
@@ -1327,60 +1324,45 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) | |||
1327 | tasklet_schedule(&engine->irq_tasklet); | 1324 | tasklet_schedule(&engine->irq_tasklet); |
1328 | } | 1325 | } |
1329 | 1326 | ||
1330 | static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv, | 1327 | static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv, |
1331 | u32 master_ctl) | 1328 | u32 master_ctl, |
1329 | u32 gt_iir[4]) | ||
1332 | { | 1330 | { |
1333 | irqreturn_t ret = IRQ_NONE; | 1331 | irqreturn_t ret = IRQ_NONE; |
1334 | 1332 | ||
1335 | if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { | 1333 | if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { |
1336 | u32 iir = I915_READ_FW(GEN8_GT_IIR(0)); | 1334 | gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0)); |
1337 | if (iir) { | 1335 | if (gt_iir[0]) { |
1338 | I915_WRITE_FW(GEN8_GT_IIR(0), iir); | 1336 | I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]); |
1339 | ret = IRQ_HANDLED; | 1337 | ret = IRQ_HANDLED; |
1340 | |||
1341 | gen8_cs_irq_handler(&dev_priv->engine[RCS], | ||
1342 | iir, GEN8_RCS_IRQ_SHIFT); | ||
1343 | |||
1344 | gen8_cs_irq_handler(&dev_priv->engine[BCS], | ||
1345 | iir, GEN8_BCS_IRQ_SHIFT); | ||
1346 | } else | 1338 | } else |
1347 | DRM_ERROR("The master control interrupt lied (GT0)!\n"); | 1339 | DRM_ERROR("The master control interrupt lied (GT0)!\n"); |
1348 | } | 1340 | } |
1349 | 1341 | ||
1350 | if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { | 1342 | if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { |
1351 | u32 iir = I915_READ_FW(GEN8_GT_IIR(1)); | 1343 | gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1)); |
1352 | if (iir) { | 1344 | if (gt_iir[1]) { |
1353 | I915_WRITE_FW(GEN8_GT_IIR(1), iir); | 1345 | I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]); |
1354 | ret = IRQ_HANDLED; | 1346 | ret = IRQ_HANDLED; |
1355 | |||
1356 | gen8_cs_irq_handler(&dev_priv->engine[VCS], | ||
1357 | iir, GEN8_VCS1_IRQ_SHIFT); | ||
1358 | |||
1359 | gen8_cs_irq_handler(&dev_priv->engine[VCS2], | ||
1360 | iir, GEN8_VCS2_IRQ_SHIFT); | ||
1361 | } else | 1347 | } else |
1362 | DRM_ERROR("The master control interrupt lied (GT1)!\n"); | 1348 | DRM_ERROR("The master control interrupt lied (GT1)!\n"); |
1363 | } | 1349 | } |
1364 | 1350 | ||
1365 | if (master_ctl & GEN8_GT_VECS_IRQ) { | 1351 | if (master_ctl & GEN8_GT_VECS_IRQ) { |
1366 | u32 iir = I915_READ_FW(GEN8_GT_IIR(3)); | 1352 | gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3)); |
1367 | if (iir) { | 1353 | if (gt_iir[3]) { |
1368 | I915_WRITE_FW(GEN8_GT_IIR(3), iir); | 1354 | I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]); |
1369 | ret = IRQ_HANDLED; | 1355 | ret = IRQ_HANDLED; |
1370 | |||
1371 | gen8_cs_irq_handler(&dev_priv->engine[VECS], | ||
1372 | iir, GEN8_VECS_IRQ_SHIFT); | ||
1373 | } else | 1356 | } else |
1374 | DRM_ERROR("The master control interrupt lied (GT3)!\n"); | 1357 | DRM_ERROR("The master control interrupt lied (GT3)!\n"); |
1375 | } | 1358 | } |
1376 | 1359 | ||
1377 | if (master_ctl & GEN8_GT_PM_IRQ) { | 1360 | if (master_ctl & GEN8_GT_PM_IRQ) { |
1378 | u32 iir = I915_READ_FW(GEN8_GT_IIR(2)); | 1361 | gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2)); |
1379 | if (iir & dev_priv->pm_rps_events) { | 1362 | if (gt_iir[2] & dev_priv->pm_rps_events) { |
1380 | I915_WRITE_FW(GEN8_GT_IIR(2), | 1363 | I915_WRITE_FW(GEN8_GT_IIR(2), |
1381 | iir & dev_priv->pm_rps_events); | 1364 | gt_iir[2] & dev_priv->pm_rps_events); |
1382 | ret = IRQ_HANDLED; | 1365 | ret = IRQ_HANDLED; |
1383 | gen6_rps_irq_handler(dev_priv, iir); | ||
1384 | } else | 1366 | } else |
1385 | DRM_ERROR("The master control interrupt lied (PM)!\n"); | 1367 | DRM_ERROR("The master control interrupt lied (PM)!\n"); |
1386 | } | 1368 | } |
@@ -1388,6 +1370,31 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv, | |||
1388 | return ret; | 1370 | return ret; |
1389 | } | 1371 | } |
1390 | 1372 | ||
1373 | static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv, | ||
1374 | u32 gt_iir[4]) | ||
1375 | { | ||
1376 | if (gt_iir[0]) { | ||
1377 | gen8_cs_irq_handler(&dev_priv->engine[RCS], | ||
1378 | gt_iir[0], GEN8_RCS_IRQ_SHIFT); | ||
1379 | gen8_cs_irq_handler(&dev_priv->engine[BCS], | ||
1380 | gt_iir[0], GEN8_BCS_IRQ_SHIFT); | ||
1381 | } | ||
1382 | |||
1383 | if (gt_iir[1]) { | ||
1384 | gen8_cs_irq_handler(&dev_priv->engine[VCS], | ||
1385 | gt_iir[1], GEN8_VCS1_IRQ_SHIFT); | ||
1386 | gen8_cs_irq_handler(&dev_priv->engine[VCS2], | ||
1387 | gt_iir[1], GEN8_VCS2_IRQ_SHIFT); | ||
1388 | } | ||
1389 | |||
1390 | if (gt_iir[3]) | ||
1391 | gen8_cs_irq_handler(&dev_priv->engine[VECS], | ||
1392 | gt_iir[3], GEN8_VECS_IRQ_SHIFT); | ||
1393 | |||
1394 | if (gt_iir[2] & dev_priv->pm_rps_events) | ||
1395 | gen6_rps_irq_handler(dev_priv, gt_iir[2]); | ||
1396 | } | ||
1397 | |||
1391 | static bool bxt_port_hotplug_long_detect(enum port port, u32 val) | 1398 | static bool bxt_port_hotplug_long_detect(enum port port, u32 val) |
1392 | { | 1399 | { |
1393 | switch (port) { | 1400 | switch (port) { |
@@ -1644,10 +1651,10 @@ static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe) | |||
1644 | return true; | 1651 | return true; |
1645 | } | 1652 | } |
1646 | 1653 | ||
1647 | static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) | 1654 | static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir, |
1655 | u32 pipe_stats[I915_MAX_PIPES]) | ||
1648 | { | 1656 | { |
1649 | struct drm_i915_private *dev_priv = dev->dev_private; | 1657 | struct drm_i915_private *dev_priv = dev->dev_private; |
1650 | u32 pipe_stats[I915_MAX_PIPES] = { }; | ||
1651 | int pipe; | 1658 | int pipe; |
1652 | 1659 | ||
1653 | spin_lock(&dev_priv->irq_lock); | 1660 | spin_lock(&dev_priv->irq_lock); |
@@ -1701,6 +1708,13 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) | |||
1701 | I915_WRITE(reg, pipe_stats[pipe]); | 1708 | I915_WRITE(reg, pipe_stats[pipe]); |
1702 | } | 1709 | } |
1703 | spin_unlock(&dev_priv->irq_lock); | 1710 | spin_unlock(&dev_priv->irq_lock); |
1711 | } | ||
1712 | |||
1713 | static void valleyview_pipestat_irq_handler(struct drm_device *dev, | ||
1714 | u32 pipe_stats[I915_MAX_PIPES]) | ||
1715 | { | ||
1716 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
1717 | enum pipe pipe; | ||
1704 | 1718 | ||
1705 | for_each_pipe(dev_priv, pipe) { | 1719 | for_each_pipe(dev_priv, pipe) { |
1706 | if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && | 1720 | if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && |
@@ -1723,21 +1737,20 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) | |||
1723 | gmbus_irq_handler(dev); | 1737 | gmbus_irq_handler(dev); |
1724 | } | 1738 | } |
1725 | 1739 | ||
1726 | static void i9xx_hpd_irq_handler(struct drm_device *dev) | 1740 | static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) |
1727 | { | 1741 | { |
1728 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1729 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | 1742 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); |
1730 | u32 pin_mask = 0, long_mask = 0; | ||
1731 | 1743 | ||
1732 | if (!hotplug_status) | 1744 | if (hotplug_status) |
1733 | return; | 1745 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
1734 | 1746 | ||
1735 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); | 1747 | return hotplug_status; |
1736 | /* | 1748 | } |
1737 | * Make sure hotplug status is cleared before we clear IIR, or else we | 1749 | |
1738 | * may miss hotplug events. | 1750 | static void i9xx_hpd_irq_handler(struct drm_device *dev, |
1739 | */ | 1751 | u32 hotplug_status) |
1740 | POSTING_READ(PORT_HOTPLUG_STAT); | 1752 | { |
1753 | u32 pin_mask = 0, long_mask = 0; | ||
1741 | 1754 | ||
1742 | if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { | 1755 | if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
1743 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; | 1756 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; |
@@ -1768,7 +1781,6 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) | |||
1768 | { | 1781 | { |
1769 | struct drm_device *dev = arg; | 1782 | struct drm_device *dev = arg; |
1770 | struct drm_i915_private *dev_priv = dev->dev_private; | 1783 | struct drm_i915_private *dev_priv = dev->dev_private; |
1771 | u32 iir, gt_iir, pm_iir; | ||
1772 | irqreturn_t ret = IRQ_NONE; | 1784 | irqreturn_t ret = IRQ_NONE; |
1773 | 1785 | ||
1774 | if (!intel_irqs_enabled(dev_priv)) | 1786 | if (!intel_irqs_enabled(dev_priv)) |
@@ -1777,40 +1789,72 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) | |||
1777 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ | 1789 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */ |
1778 | disable_rpm_wakeref_asserts(dev_priv); | 1790 | disable_rpm_wakeref_asserts(dev_priv); |
1779 | 1791 | ||
1780 | while (true) { | 1792 | do { |
1781 | /* Find, clear, then process each source of interrupt */ | 1793 | u32 iir, gt_iir, pm_iir; |
1794 | u32 pipe_stats[I915_MAX_PIPES] = {}; | ||
1795 | u32 hotplug_status = 0; | ||
1796 | u32 ier = 0; | ||
1782 | 1797 | ||
1783 | gt_iir = I915_READ(GTIIR); | 1798 | gt_iir = I915_READ(GTIIR); |
1784 | if (gt_iir) | ||
1785 | I915_WRITE(GTIIR, gt_iir); | ||
1786 | |||
1787 | pm_iir = I915_READ(GEN6_PMIIR); | 1799 | pm_iir = I915_READ(GEN6_PMIIR); |
1788 | if (pm_iir) | ||
1789 | I915_WRITE(GEN6_PMIIR, pm_iir); | ||
1790 | |||
1791 | iir = I915_READ(VLV_IIR); | 1800 | iir = I915_READ(VLV_IIR); |
1792 | if (iir) { | ||
1793 | /* Consume port before clearing IIR or we'll miss events */ | ||
1794 | if (iir & I915_DISPLAY_PORT_INTERRUPT) | ||
1795 | i9xx_hpd_irq_handler(dev); | ||
1796 | I915_WRITE(VLV_IIR, iir); | ||
1797 | } | ||
1798 | 1801 | ||
1799 | if (gt_iir == 0 && pm_iir == 0 && iir == 0) | 1802 | if (gt_iir == 0 && pm_iir == 0 && iir == 0) |
1800 | goto out; | 1803 | break; |
1801 | 1804 | ||
1802 | ret = IRQ_HANDLED; | 1805 | ret = IRQ_HANDLED; |
1803 | 1806 | ||
1807 | /* | ||
1808 | * Theory on interrupt generation, based on empirical evidence: | ||
1809 | * | ||
1810 | * x = ((VLV_IIR & VLV_IER) || | ||
1811 | * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && | ||
1812 | * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); | ||
1813 | * | ||
1814 | * A CPU interrupt will only be raised when 'x' has a 0->1 edge. | ||
1815 | * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to | ||
1816 | * guarantee the CPU interrupt will be raised again even if we | ||
1817 | * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR | ||
1818 | * bits this time around. | ||
1819 | */ | ||
1820 | I915_WRITE(VLV_MASTER_IER, 0); | ||
1821 | ier = I915_READ(VLV_IER); | ||
1822 | I915_WRITE(VLV_IER, 0); | ||
1823 | |||
1804 | if (gt_iir) | 1824 | if (gt_iir) |
1805 | snb_gt_irq_handler(dev, dev_priv, gt_iir); | 1825 | I915_WRITE(GTIIR, gt_iir); |
1806 | if (pm_iir) | 1826 | if (pm_iir) |
1807 | gen6_rps_irq_handler(dev_priv, pm_iir); | 1827 | I915_WRITE(GEN6_PMIIR, pm_iir); |
1828 | |||
1829 | if (iir & I915_DISPLAY_PORT_INTERRUPT) | ||
1830 | hotplug_status = i9xx_hpd_irq_ack(dev_priv); | ||
1831 | |||
1808 | /* Call regardless, as some status bits might not be | 1832 | /* Call regardless, as some status bits might not be |
1809 | * signalled in iir */ | 1833 | * signalled in iir */ |
1810 | valleyview_pipestat_irq_handler(dev, iir); | 1834 | valleyview_pipestat_irq_ack(dev, iir, pipe_stats); |
1811 | } | 1835 | |
1836 | /* | ||
1837 | * VLV_IIR is single buffered, and reflects the level | ||
1838 | * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. | ||
1839 | */ | ||
1840 | if (iir) | ||
1841 | I915_WRITE(VLV_IIR, iir); | ||
1842 | |||
1843 | I915_WRITE(VLV_IER, ier); | ||
1844 | I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); | ||
1845 | POSTING_READ(VLV_MASTER_IER); | ||
1846 | |||
1847 | if (gt_iir) | ||
1848 | snb_gt_irq_handler(dev_priv, gt_iir); | ||
1849 | if (pm_iir) | ||
1850 | gen6_rps_irq_handler(dev_priv, pm_iir); | ||
1851 | |||
1852 | if (hotplug_status) | ||
1853 | i9xx_hpd_irq_handler(dev, hotplug_status); | ||
1854 | |||
1855 | valleyview_pipestat_irq_handler(dev, pipe_stats); | ||
1856 | } while (0); | ||
1812 | 1857 | ||
1813 | out: | ||
1814 | enable_rpm_wakeref_asserts(dev_priv); | 1858 | enable_rpm_wakeref_asserts(dev_priv); |
1815 | 1859 | ||
1816 | return ret; | 1860 | return ret; |
@@ -1820,7 +1864,6 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) | |||
1820 | { | 1864 | { |
1821 | struct drm_device *dev = arg; | 1865 | struct drm_device *dev = arg; |
1822 | struct drm_i915_private *dev_priv = dev->dev_private; | 1866 | struct drm_i915_private *dev_priv = dev->dev_private; |
1823 | u32 master_ctl, iir; | ||
1824 | irqreturn_t ret = IRQ_NONE; | 1867 | irqreturn_t ret = IRQ_NONE; |
1825 | 1868 | ||
1826 | if (!intel_irqs_enabled(dev_priv)) | 1869 | if (!intel_irqs_enabled(dev_priv)) |
@@ -1830,6 +1873,12 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) | |||
1830 | disable_rpm_wakeref_asserts(dev_priv); | 1873 | disable_rpm_wakeref_asserts(dev_priv); |
1831 | 1874 | ||
1832 | do { | 1875 | do { |
1876 | u32 master_ctl, iir; | ||
1877 | u32 gt_iir[4] = {}; | ||
1878 | u32 pipe_stats[I915_MAX_PIPES] = {}; | ||
1879 | u32 hotplug_status = 0; | ||
1880 | u32 ier = 0; | ||
1881 | |||
1833 | master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; | 1882 | master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; |
1834 | iir = I915_READ(VLV_IIR); | 1883 | iir = I915_READ(VLV_IIR); |
1835 | 1884 | ||
@@ -1838,25 +1887,49 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) | |||
1838 | 1887 | ||
1839 | ret = IRQ_HANDLED; | 1888 | ret = IRQ_HANDLED; |
1840 | 1889 | ||
1890 | /* | ||
1891 | * Theory on interrupt generation, based on empirical evidence: | ||
1892 | * | ||
1893 | * x = ((VLV_IIR & VLV_IER) || | ||
1894 | * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && | ||
1895 | * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); | ||
1896 | * | ||
1897 | * A CPU interrupt will only be raised when 'x' has a 0->1 edge. | ||
1898 | * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to | ||
1899 | * guarantee the CPU interrupt will be raised again even if we | ||
1900 | * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL | ||
1901 | * bits this time around. | ||
1902 | */ | ||
1841 | I915_WRITE(GEN8_MASTER_IRQ, 0); | 1903 | I915_WRITE(GEN8_MASTER_IRQ, 0); |
1904 | ier = I915_READ(VLV_IER); | ||
1905 | I915_WRITE(VLV_IER, 0); | ||
1842 | 1906 | ||
1843 | /* Find, clear, then process each source of interrupt */ | 1907 | gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); |
1844 | 1908 | ||
1845 | if (iir) { | 1909 | if (iir & I915_DISPLAY_PORT_INTERRUPT) |
1846 | /* Consume port before clearing IIR or we'll miss events */ | 1910 | hotplug_status = i9xx_hpd_irq_ack(dev_priv); |
1847 | if (iir & I915_DISPLAY_PORT_INTERRUPT) | ||
1848 | i9xx_hpd_irq_handler(dev); | ||
1849 | I915_WRITE(VLV_IIR, iir); | ||
1850 | } | ||
1851 | |||
1852 | gen8_gt_irq_handler(dev_priv, master_ctl); | ||
1853 | 1911 | ||
1854 | /* Call regardless, as some status bits might not be | 1912 | /* Call regardless, as some status bits might not be |
1855 | * signalled in iir */ | 1913 | * signalled in iir */ |
1856 | valleyview_pipestat_irq_handler(dev, iir); | 1914 | valleyview_pipestat_irq_ack(dev, iir, pipe_stats); |
1857 | 1915 | ||
1858 | I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); | 1916 | /* |
1917 | * VLV_IIR is single buffered, and reflects the level | ||
1918 | * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. | ||
1919 | */ | ||
1920 | if (iir) | ||
1921 | I915_WRITE(VLV_IIR, iir); | ||
1922 | |||
1923 | I915_WRITE(VLV_IER, ier); | ||
1924 | I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); | ||
1859 | POSTING_READ(GEN8_MASTER_IRQ); | 1925 | POSTING_READ(GEN8_MASTER_IRQ); |
1926 | |||
1927 | gen8_gt_irq_handler(dev_priv, gt_iir); | ||
1928 | |||
1929 | if (hotplug_status) | ||
1930 | i9xx_hpd_irq_handler(dev, hotplug_status); | ||
1931 | |||
1932 | valleyview_pipestat_irq_handler(dev, pipe_stats); | ||
1860 | } while (0); | 1933 | } while (0); |
1861 | 1934 | ||
1862 | enable_rpm_wakeref_asserts(dev_priv); | 1935 | enable_rpm_wakeref_asserts(dev_priv); |
@@ -2217,9 +2290,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) | |||
2217 | I915_WRITE(GTIIR, gt_iir); | 2290 | I915_WRITE(GTIIR, gt_iir); |
2218 | ret = IRQ_HANDLED; | 2291 | ret = IRQ_HANDLED; |
2219 | if (INTEL_INFO(dev)->gen >= 6) | 2292 | if (INTEL_INFO(dev)->gen >= 6) |
2220 | snb_gt_irq_handler(dev, dev_priv, gt_iir); | 2293 | snb_gt_irq_handler(dev_priv, gt_iir); |
2221 | else | 2294 | else |
2222 | ilk_gt_irq_handler(dev, dev_priv, gt_iir); | 2295 | ilk_gt_irq_handler(dev_priv, gt_iir); |
2223 | } | 2296 | } |
2224 | 2297 | ||
2225 | de_iir = I915_READ(DEIIR); | 2298 | de_iir = I915_READ(DEIIR); |
@@ -2419,6 +2492,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) | |||
2419 | struct drm_device *dev = arg; | 2492 | struct drm_device *dev = arg; |
2420 | struct drm_i915_private *dev_priv = dev->dev_private; | 2493 | struct drm_i915_private *dev_priv = dev->dev_private; |
2421 | u32 master_ctl; | 2494 | u32 master_ctl; |
2495 | u32 gt_iir[4] = {}; | ||
2422 | irqreturn_t ret; | 2496 | irqreturn_t ret; |
2423 | 2497 | ||
2424 | if (!intel_irqs_enabled(dev_priv)) | 2498 | if (!intel_irqs_enabled(dev_priv)) |
@@ -2435,7 +2509,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) | |||
2435 | disable_rpm_wakeref_asserts(dev_priv); | 2509 | disable_rpm_wakeref_asserts(dev_priv); |
2436 | 2510 | ||
2437 | /* Find, clear, then process each source of interrupt */ | 2511 | /* Find, clear, then process each source of interrupt */ |
2438 | ret = gen8_gt_irq_handler(dev_priv, master_ctl); | 2512 | ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); |
2513 | gen8_gt_irq_handler(dev_priv, gt_iir); | ||
2439 | ret |= gen8_de_irq_handler(dev_priv, master_ctl); | 2514 | ret |= gen8_de_irq_handler(dev_priv, master_ctl); |
2440 | 2515 | ||
2441 | I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); | 2516 | I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); |
@@ -2483,7 +2558,6 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv, | |||
2483 | static void i915_reset_and_wakeup(struct drm_device *dev) | 2558 | static void i915_reset_and_wakeup(struct drm_device *dev) |
2484 | { | 2559 | { |
2485 | struct drm_i915_private *dev_priv = to_i915(dev); | 2560 | struct drm_i915_private *dev_priv = to_i915(dev); |
2486 | struct i915_gpu_error *error = &dev_priv->gpu_error; | ||
2487 | char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; | 2561 | char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; |
2488 | char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; | 2562 | char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; |
2489 | char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; | 2563 | char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; |
@@ -2501,7 +2575,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev) | |||
2501 | * the reset in-progress bit is only ever set by code outside of this | 2575 | * the reset in-progress bit is only ever set by code outside of this |
2502 | * work we don't need to worry about any other races. | 2576 | * work we don't need to worry about any other races. |
2503 | */ | 2577 | */ |
2504 | if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { | 2578 | if (i915_reset_in_progress(&dev_priv->gpu_error)) { |
2505 | DRM_DEBUG_DRIVER("resetting chip\n"); | 2579 | DRM_DEBUG_DRIVER("resetting chip\n"); |
2506 | kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, | 2580 | kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, |
2507 | reset_event); | 2581 | reset_event); |
@@ -2529,25 +2603,9 @@ static void i915_reset_and_wakeup(struct drm_device *dev) | |||
2529 | 2603 | ||
2530 | intel_runtime_pm_put(dev_priv); | 2604 | intel_runtime_pm_put(dev_priv); |
2531 | 2605 | ||
2532 | if (ret == 0) { | 2606 | if (ret == 0) |
2533 | /* | ||
2534 | * After all the gem state is reset, increment the reset | ||
2535 | * counter and wake up everyone waiting for the reset to | ||
2536 | * complete. | ||
2537 | * | ||
2538 | * Since unlock operations are a one-sided barrier only, | ||
2539 | * we need to insert a barrier here to order any seqno | ||
2540 | * updates before | ||
2541 | * the counter increment. | ||
2542 | */ | ||
2543 | smp_mb__before_atomic(); | ||
2544 | atomic_inc(&dev_priv->gpu_error.reset_counter); | ||
2545 | |||
2546 | kobject_uevent_env(&dev->primary->kdev->kobj, | 2607 | kobject_uevent_env(&dev->primary->kdev->kobj, |
2547 | KOBJ_CHANGE, reset_done_event); | 2608 | KOBJ_CHANGE, reset_done_event); |
2548 | } else { | ||
2549 | atomic_or(I915_WEDGED, &error->reset_counter); | ||
2550 | } | ||
2551 | 2609 | ||
2552 | /* | 2610 | /* |
2553 | * Note: The wake_up also serves as a memory barrier so that | 2611 | * Note: The wake_up also serves as a memory barrier so that |
@@ -3285,6 +3343,55 @@ static void gen5_gt_irq_reset(struct drm_device *dev) | |||
3285 | GEN5_IRQ_RESET(GEN6_PM); | 3343 | GEN5_IRQ_RESET(GEN6_PM); |
3286 | } | 3344 | } |
3287 | 3345 | ||
3346 | static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) | ||
3347 | { | ||
3348 | enum pipe pipe; | ||
3349 | |||
3350 | if (IS_CHERRYVIEW(dev_priv)) | ||
3351 | I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); | ||
3352 | else | ||
3353 | I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); | ||
3354 | |||
3355 | i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); | ||
3356 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | ||
3357 | |||
3358 | for_each_pipe(dev_priv, pipe) { | ||
3359 | I915_WRITE(PIPESTAT(pipe), | ||
3360 | PIPE_FIFO_UNDERRUN_STATUS | | ||
3361 | PIPESTAT_INT_STATUS_MASK); | ||
3362 | dev_priv->pipestat_irq_mask[pipe] = 0; | ||
3363 | } | ||
3364 | |||
3365 | GEN5_IRQ_RESET(VLV_); | ||
3366 | dev_priv->irq_mask = ~0; | ||
3367 | } | ||
3368 | |||
3369 | static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) | ||
3370 | { | ||
3371 | u32 pipestat_mask; | ||
3372 | u32 enable_mask; | ||
3373 | enum pipe pipe; | ||
3374 | |||
3375 | pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | | ||
3376 | PIPE_CRC_DONE_INTERRUPT_STATUS; | ||
3377 | |||
3378 | i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); | ||
3379 | for_each_pipe(dev_priv, pipe) | ||
3380 | i915_enable_pipestat(dev_priv, pipe, pipestat_mask); | ||
3381 | |||
3382 | enable_mask = I915_DISPLAY_PORT_INTERRUPT | | ||
3383 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | ||
3384 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; | ||
3385 | if (IS_CHERRYVIEW(dev_priv)) | ||
3386 | enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; | ||
3387 | |||
3388 | WARN_ON(dev_priv->irq_mask != ~0); | ||
3389 | |||
3390 | dev_priv->irq_mask = ~enable_mask; | ||
3391 | |||
3392 | GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); | ||
3393 | } | ||
3394 | |||
3288 | /* drm_dma.h hooks | 3395 | /* drm_dma.h hooks |
3289 | */ | 3396 | */ |
3290 | static void ironlake_irq_reset(struct drm_device *dev) | 3397 | static void ironlake_irq_reset(struct drm_device *dev) |
@@ -3302,34 +3409,19 @@ static void ironlake_irq_reset(struct drm_device *dev) | |||
3302 | ibx_irq_reset(dev); | 3409 | ibx_irq_reset(dev); |
3303 | } | 3410 | } |
3304 | 3411 | ||
3305 | static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) | ||
3306 | { | ||
3307 | enum pipe pipe; | ||
3308 | |||
3309 | i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0); | ||
3310 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | ||
3311 | |||
3312 | for_each_pipe(dev_priv, pipe) | ||
3313 | I915_WRITE(PIPESTAT(pipe), 0xffff); | ||
3314 | |||
3315 | GEN5_IRQ_RESET(VLV_); | ||
3316 | } | ||
3317 | |||
3318 | static void valleyview_irq_preinstall(struct drm_device *dev) | 3412 | static void valleyview_irq_preinstall(struct drm_device *dev) |
3319 | { | 3413 | { |
3320 | struct drm_i915_private *dev_priv = dev->dev_private; | 3414 | struct drm_i915_private *dev_priv = dev->dev_private; |
3321 | 3415 | ||
3322 | /* VLV magic */ | 3416 | I915_WRITE(VLV_MASTER_IER, 0); |
3323 | I915_WRITE(VLV_IMR, 0); | 3417 | POSTING_READ(VLV_MASTER_IER); |
3324 | I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); | ||
3325 | I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); | ||
3326 | I915_WRITE(RING_IMR(BLT_RING_BASE), 0); | ||
3327 | 3418 | ||
3328 | gen5_gt_irq_reset(dev); | 3419 | gen5_gt_irq_reset(dev); |
3329 | 3420 | ||
3330 | I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); | 3421 | spin_lock_irq(&dev_priv->irq_lock); |
3331 | 3422 | if (dev_priv->display_irqs_enabled) | |
3332 | vlv_display_irq_reset(dev_priv); | 3423 | vlv_display_irq_reset(dev_priv); |
3424 | spin_unlock_irq(&dev_priv->irq_lock); | ||
3333 | } | 3425 | } |
3334 | 3426 | ||
3335 | static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) | 3427 | static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) |
@@ -3402,9 +3494,10 @@ static void cherryview_irq_preinstall(struct drm_device *dev) | |||
3402 | 3494 | ||
3403 | GEN5_IRQ_RESET(GEN8_PCU_); | 3495 | GEN5_IRQ_RESET(GEN8_PCU_); |
3404 | 3496 | ||
3405 | I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); | 3497 | spin_lock_irq(&dev_priv->irq_lock); |
3406 | 3498 | if (dev_priv->display_irqs_enabled) | |
3407 | vlv_display_irq_reset(dev_priv); | 3499 | vlv_display_irq_reset(dev_priv); |
3500 | spin_unlock_irq(&dev_priv->irq_lock); | ||
3408 | } | 3501 | } |
3409 | 3502 | ||
3410 | static u32 intel_hpd_enabled_irqs(struct drm_device *dev, | 3503 | static u32 intel_hpd_enabled_irqs(struct drm_device *dev, |
@@ -3651,74 +3744,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
3651 | return 0; | 3744 | return 0; |
3652 | } | 3745 | } |
3653 | 3746 | ||
3654 | static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv) | ||
3655 | { | ||
3656 | u32 pipestat_mask; | ||
3657 | u32 iir_mask; | ||
3658 | enum pipe pipe; | ||
3659 | |||
3660 | pipestat_mask = PIPESTAT_INT_STATUS_MASK | | ||
3661 | PIPE_FIFO_UNDERRUN_STATUS; | ||
3662 | |||
3663 | for_each_pipe(dev_priv, pipe) | ||
3664 | I915_WRITE(PIPESTAT(pipe), pipestat_mask); | ||
3665 | POSTING_READ(PIPESTAT(PIPE_A)); | ||
3666 | |||
3667 | pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | | ||
3668 | PIPE_CRC_DONE_INTERRUPT_STATUS; | ||
3669 | |||
3670 | i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); | ||
3671 | for_each_pipe(dev_priv, pipe) | ||
3672 | i915_enable_pipestat(dev_priv, pipe, pipestat_mask); | ||
3673 | |||
3674 | iir_mask = I915_DISPLAY_PORT_INTERRUPT | | ||
3675 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | ||
3676 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; | ||
3677 | if (IS_CHERRYVIEW(dev_priv)) | ||
3678 | iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; | ||
3679 | dev_priv->irq_mask &= ~iir_mask; | ||
3680 | |||
3681 | I915_WRITE(VLV_IIR, iir_mask); | ||
3682 | I915_WRITE(VLV_IIR, iir_mask); | ||
3683 | I915_WRITE(VLV_IER, ~dev_priv->irq_mask); | ||
3684 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); | ||
3685 | POSTING_READ(VLV_IMR); | ||
3686 | } | ||
3687 | |||
3688 | static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) | ||
3689 | { | ||
3690 | u32 pipestat_mask; | ||
3691 | u32 iir_mask; | ||
3692 | enum pipe pipe; | ||
3693 | |||
3694 | iir_mask = I915_DISPLAY_PORT_INTERRUPT | | ||
3695 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | ||
3696 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; | ||
3697 | if (IS_CHERRYVIEW(dev_priv)) | ||
3698 | iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; | ||
3699 | |||
3700 | dev_priv->irq_mask |= iir_mask; | ||
3701 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); | ||
3702 | I915_WRITE(VLV_IER, ~dev_priv->irq_mask); | ||
3703 | I915_WRITE(VLV_IIR, iir_mask); | ||
3704 | I915_WRITE(VLV_IIR, iir_mask); | ||
3705 | POSTING_READ(VLV_IIR); | ||
3706 | |||
3707 | pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | | ||
3708 | PIPE_CRC_DONE_INTERRUPT_STATUS; | ||
3709 | |||
3710 | i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); | ||
3711 | for_each_pipe(dev_priv, pipe) | ||
3712 | i915_disable_pipestat(dev_priv, pipe, pipestat_mask); | ||
3713 | |||
3714 | pipestat_mask = PIPESTAT_INT_STATUS_MASK | | ||
3715 | PIPE_FIFO_UNDERRUN_STATUS; | ||
3716 | |||
3717 | for_each_pipe(dev_priv, pipe) | ||
3718 | I915_WRITE(PIPESTAT(pipe), pipestat_mask); | ||
3719 | POSTING_READ(PIPESTAT(PIPE_A)); | ||
3720 | } | ||
3721 | |||
3722 | void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) | 3747 | void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) |
3723 | { | 3748 | { |
3724 | assert_spin_locked(&dev_priv->irq_lock); | 3749 | assert_spin_locked(&dev_priv->irq_lock); |
@@ -3728,8 +3753,10 @@ void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) | |||
3728 | 3753 | ||
3729 | dev_priv->display_irqs_enabled = true; | 3754 | dev_priv->display_irqs_enabled = true; |
3730 | 3755 | ||
3731 | if (intel_irqs_enabled(dev_priv)) | 3756 | if (intel_irqs_enabled(dev_priv)) { |
3732 | valleyview_display_irqs_install(dev_priv); | 3757 | vlv_display_irq_reset(dev_priv); |
3758 | vlv_display_irq_postinstall(dev_priv); | ||
3759 | } | ||
3733 | } | 3760 | } |
3734 | 3761 | ||
3735 | void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) | 3762 | void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) |
@@ -3742,45 +3769,23 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) | |||
3742 | dev_priv->display_irqs_enabled = false; | 3769 | dev_priv->display_irqs_enabled = false; |
3743 | 3770 | ||
3744 | if (intel_irqs_enabled(dev_priv)) | 3771 | if (intel_irqs_enabled(dev_priv)) |
3745 | valleyview_display_irqs_uninstall(dev_priv); | 3772 | vlv_display_irq_reset(dev_priv); |
3746 | } | 3773 | } |
3747 | 3774 | ||
3748 | static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) | ||
3749 | { | ||
3750 | dev_priv->irq_mask = ~0; | ||
3751 | |||
3752 | i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); | ||
3753 | POSTING_READ(PORT_HOTPLUG_EN); | ||
3754 | |||
3755 | I915_WRITE(VLV_IIR, 0xffffffff); | ||
3756 | I915_WRITE(VLV_IIR, 0xffffffff); | ||
3757 | I915_WRITE(VLV_IER, ~dev_priv->irq_mask); | ||
3758 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); | ||
3759 | POSTING_READ(VLV_IMR); | ||
3760 | |||
3761 | /* Interrupt setup is already guaranteed to be single-threaded, this is | ||
3762 | * just to make the assert_spin_locked check happy. */ | ||
3763 | spin_lock_irq(&dev_priv->irq_lock); | ||
3764 | if (dev_priv->display_irqs_enabled) | ||
3765 | valleyview_display_irqs_install(dev_priv); | ||
3766 | spin_unlock_irq(&dev_priv->irq_lock); | ||
3767 | } | ||
3768 | 3775 | ||
3769 | static int valleyview_irq_postinstall(struct drm_device *dev) | 3776 | static int valleyview_irq_postinstall(struct drm_device *dev) |
3770 | { | 3777 | { |
3771 | struct drm_i915_private *dev_priv = dev->dev_private; | 3778 | struct drm_i915_private *dev_priv = dev->dev_private; |
3772 | 3779 | ||
3773 | vlv_display_irq_postinstall(dev_priv); | ||
3774 | |||
3775 | gen5_gt_irq_postinstall(dev); | 3780 | gen5_gt_irq_postinstall(dev); |
3776 | 3781 | ||
3777 | /* ack & enable invalid PTE error interrupts */ | 3782 | spin_lock_irq(&dev_priv->irq_lock); |
3778 | #if 0 /* FIXME: add support to irq handler for checking these bits */ | 3783 | if (dev_priv->display_irqs_enabled) |
3779 | I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); | 3784 | vlv_display_irq_postinstall(dev_priv); |
3780 | I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); | 3785 | spin_unlock_irq(&dev_priv->irq_lock); |
3781 | #endif | ||
3782 | 3786 | ||
3783 | I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); | 3787 | I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); |
3788 | POSTING_READ(VLV_MASTER_IER); | ||
3784 | 3789 | ||
3785 | return 0; | 3790 | return 0; |
3786 | } | 3791 | } |
@@ -3791,7 +3796,6 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) | |||
3791 | uint32_t gt_interrupts[] = { | 3796 | uint32_t gt_interrupts[] = { |
3792 | GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | | 3797 | GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | |
3793 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | | 3798 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | |
3794 | GT_RENDER_L3_PARITY_ERROR_INTERRUPT | | ||
3795 | GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | | 3799 | GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | |
3796 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, | 3800 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, |
3797 | GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | | 3801 | GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | |
@@ -3803,6 +3807,9 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) | |||
3803 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3807 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3804 | }; | 3808 | }; |
3805 | 3809 | ||
3810 | if (HAS_L3_DPF(dev_priv)) | ||
3811 | gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; | ||
3812 | |||
3806 | dev_priv->pm_irq_mask = 0xffffffff; | 3813 | dev_priv->pm_irq_mask = 0xffffffff; |
3807 | GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); | 3814 | GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); |
3808 | GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); | 3815 | GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); |
@@ -3870,7 +3877,7 @@ static int gen8_irq_postinstall(struct drm_device *dev) | |||
3870 | if (HAS_PCH_SPLIT(dev)) | 3877 | if (HAS_PCH_SPLIT(dev)) |
3871 | ibx_irq_postinstall(dev); | 3878 | ibx_irq_postinstall(dev); |
3872 | 3879 | ||
3873 | I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); | 3880 | I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); |
3874 | POSTING_READ(GEN8_MASTER_IRQ); | 3881 | POSTING_READ(GEN8_MASTER_IRQ); |
3875 | 3882 | ||
3876 | return 0; | 3883 | return 0; |
@@ -3880,11 +3887,14 @@ static int cherryview_irq_postinstall(struct drm_device *dev) | |||
3880 | { | 3887 | { |
3881 | struct drm_i915_private *dev_priv = dev->dev_private; | 3888 | struct drm_i915_private *dev_priv = dev->dev_private; |
3882 | 3889 | ||
3883 | vlv_display_irq_postinstall(dev_priv); | ||
3884 | |||
3885 | gen8_gt_irq_postinstall(dev_priv); | 3890 | gen8_gt_irq_postinstall(dev_priv); |
3886 | 3891 | ||
3887 | I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE); | 3892 | spin_lock_irq(&dev_priv->irq_lock); |
3893 | if (dev_priv->display_irqs_enabled) | ||
3894 | vlv_display_irq_postinstall(dev_priv); | ||
3895 | spin_unlock_irq(&dev_priv->irq_lock); | ||
3896 | |||
3897 | I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); | ||
3888 | POSTING_READ(GEN8_MASTER_IRQ); | 3898 | POSTING_READ(GEN8_MASTER_IRQ); |
3889 | 3899 | ||
3890 | return 0; | 3900 | return 0; |
@@ -3900,20 +3910,6 @@ static void gen8_irq_uninstall(struct drm_device *dev) | |||
3900 | gen8_irq_reset(dev); | 3910 | gen8_irq_reset(dev); |
3901 | } | 3911 | } |
3902 | 3912 | ||
3903 | static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv) | ||
3904 | { | ||
3905 | /* Interrupt setup is already guaranteed to be single-threaded, this is | ||
3906 | * just to make the assert_spin_locked check happy. */ | ||
3907 | spin_lock_irq(&dev_priv->irq_lock); | ||
3908 | if (dev_priv->display_irqs_enabled) | ||
3909 | valleyview_display_irqs_uninstall(dev_priv); | ||
3910 | spin_unlock_irq(&dev_priv->irq_lock); | ||
3911 | |||
3912 | vlv_display_irq_reset(dev_priv); | ||
3913 | |||
3914 | dev_priv->irq_mask = ~0; | ||
3915 | } | ||
3916 | |||
3917 | static void valleyview_irq_uninstall(struct drm_device *dev) | 3913 | static void valleyview_irq_uninstall(struct drm_device *dev) |
3918 | { | 3914 | { |
3919 | struct drm_i915_private *dev_priv = dev->dev_private; | 3915 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -3922,12 +3918,16 @@ static void valleyview_irq_uninstall(struct drm_device *dev) | |||
3922 | return; | 3918 | return; |
3923 | 3919 | ||
3924 | I915_WRITE(VLV_MASTER_IER, 0); | 3920 | I915_WRITE(VLV_MASTER_IER, 0); |
3921 | POSTING_READ(VLV_MASTER_IER); | ||
3925 | 3922 | ||
3926 | gen5_gt_irq_reset(dev); | 3923 | gen5_gt_irq_reset(dev); |
3927 | 3924 | ||
3928 | I915_WRITE(HWSTAM, 0xffffffff); | 3925 | I915_WRITE(HWSTAM, 0xffffffff); |
3929 | 3926 | ||
3930 | vlv_display_irq_uninstall(dev_priv); | 3927 | spin_lock_irq(&dev_priv->irq_lock); |
3928 | if (dev_priv->display_irqs_enabled) | ||
3929 | vlv_display_irq_reset(dev_priv); | ||
3930 | spin_unlock_irq(&dev_priv->irq_lock); | ||
3931 | } | 3931 | } |
3932 | 3932 | ||
3933 | static void cherryview_irq_uninstall(struct drm_device *dev) | 3933 | static void cherryview_irq_uninstall(struct drm_device *dev) |
@@ -3944,7 +3944,10 @@ static void cherryview_irq_uninstall(struct drm_device *dev) | |||
3944 | 3944 | ||
3945 | GEN5_IRQ_RESET(GEN8_PCU_); | 3945 | GEN5_IRQ_RESET(GEN8_PCU_); |
3946 | 3946 | ||
3947 | vlv_display_irq_uninstall(dev_priv); | 3947 | spin_lock_irq(&dev_priv->irq_lock); |
3948 | if (dev_priv->display_irqs_enabled) | ||
3949 | vlv_display_irq_reset(dev_priv); | ||
3950 | spin_unlock_irq(&dev_priv->irq_lock); | ||
3948 | } | 3951 | } |
3949 | 3952 | ||
3950 | static void ironlake_irq_uninstall(struct drm_device *dev) | 3953 | static void ironlake_irq_uninstall(struct drm_device *dev) |
@@ -4271,8 +4274,11 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) | |||
4271 | 4274 | ||
4272 | /* Consume port. Then clear IIR or we'll miss events */ | 4275 | /* Consume port. Then clear IIR or we'll miss events */ |
4273 | if (I915_HAS_HOTPLUG(dev) && | 4276 | if (I915_HAS_HOTPLUG(dev) && |
4274 | iir & I915_DISPLAY_PORT_INTERRUPT) | 4277 | iir & I915_DISPLAY_PORT_INTERRUPT) { |
4275 | i9xx_hpd_irq_handler(dev); | 4278 | u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); |
4279 | if (hotplug_status) | ||
4280 | i9xx_hpd_irq_handler(dev, hotplug_status); | ||
4281 | } | ||
4276 | 4282 | ||
4277 | I915_WRITE(IIR, iir & ~flip_mask); | 4283 | I915_WRITE(IIR, iir & ~flip_mask); |
4278 | new_iir = I915_READ(IIR); /* Flush posted writes */ | 4284 | new_iir = I915_READ(IIR); /* Flush posted writes */ |
@@ -4501,8 +4507,11 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) | |||
4501 | ret = IRQ_HANDLED; | 4507 | ret = IRQ_HANDLED; |
4502 | 4508 | ||
4503 | /* Consume port. Then clear IIR or we'll miss events */ | 4509 | /* Consume port. Then clear IIR or we'll miss events */ |
4504 | if (iir & I915_DISPLAY_PORT_INTERRUPT) | 4510 | if (iir & I915_DISPLAY_PORT_INTERRUPT) { |
4505 | i9xx_hpd_irq_handler(dev); | 4511 | u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); |
4512 | if (hotplug_status) | ||
4513 | i9xx_hpd_irq_handler(dev, hotplug_status); | ||
4514 | } | ||
4506 | 4515 | ||
4507 | I915_WRITE(IIR, iir & ~flip_mask); | 4516 | I915_WRITE(IIR, iir & ~flip_mask); |
4508 | new_iir = I915_READ(IIR); /* Flush posted writes */ | 4517 | new_iir = I915_READ(IIR); /* Flush posted writes */ |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index cea5a390d8c9..58ac6c7c690b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -79,6 +79,16 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) | |||
79 | 79 | ||
80 | /* PCI config space */ | 80 | /* PCI config space */ |
81 | 81 | ||
82 | #define MCHBAR_I915 0x44 | ||
83 | #define MCHBAR_I965 0x48 | ||
84 | #define MCHBAR_SIZE (4 * 4096) | ||
85 | |||
86 | #define DEVEN 0x54 | ||
87 | #define DEVEN_MCHBAR_EN (1 << 28) | ||
88 | |||
89 | #define BSM 0x5c | ||
90 | #define BSM_MASK (0xFFFF << 20) | ||
91 | |||
82 | #define HPLLCC 0xc0 /* 85x only */ | 92 | #define HPLLCC 0xc0 /* 85x only */ |
83 | #define GC_CLOCK_CONTROL_MASK (0x7 << 0) | 93 | #define GC_CLOCK_CONTROL_MASK (0x7 << 0) |
84 | #define GC_CLOCK_133_200 (0 << 0) | 94 | #define GC_CLOCK_133_200 (0 << 0) |
@@ -90,6 +100,16 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) | |||
90 | #define GC_CLOCK_166_266 (6 << 0) | 100 | #define GC_CLOCK_166_266 (6 << 0) |
91 | #define GC_CLOCK_166_250 (7 << 0) | 101 | #define GC_CLOCK_166_250 (7 << 0) |
92 | 102 | ||
103 | #define I915_GDRST 0xc0 /* PCI config register */ | ||
104 | #define GRDOM_FULL (0 << 2) | ||
105 | #define GRDOM_RENDER (1 << 2) | ||
106 | #define GRDOM_MEDIA (3 << 2) | ||
107 | #define GRDOM_MASK (3 << 2) | ||
108 | #define GRDOM_RESET_STATUS (1 << 1) | ||
109 | #define GRDOM_RESET_ENABLE (1 << 0) | ||
110 | |||
111 | #define GCDGMBUS 0xcc | ||
112 | |||
93 | #define GCFGC2 0xda | 113 | #define GCFGC2 0xda |
94 | #define GCFGC 0xf0 /* 915+ only */ | 114 | #define GCFGC 0xf0 /* 915+ only */ |
95 | #define GC_LOW_FREQUENCY_ENABLE (1 << 7) | 115 | #define GC_LOW_FREQUENCY_ENABLE (1 << 7) |
@@ -121,18 +141,16 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) | |||
121 | #define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0) | 141 | #define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0) |
122 | #define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) | 142 | #define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) |
123 | #define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) | 143 | #define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) |
124 | #define GCDGMBUS 0xcc | ||
125 | #define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */ | ||
126 | 144 | ||
145 | #define ASLE 0xe4 | ||
146 | #define ASLS 0xfc | ||
147 | |||
148 | #define SWSCI 0xe8 | ||
149 | #define SWSCI_SCISEL (1 << 15) | ||
150 | #define SWSCI_GSSCIE (1 << 0) | ||
151 | |||
152 | #define LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */ | ||
127 | 153 | ||
128 | /* Graphics reset regs */ | ||
129 | #define I915_GDRST 0xc0 /* PCI config register */ | ||
130 | #define GRDOM_FULL (0<<2) | ||
131 | #define GRDOM_RENDER (1<<2) | ||
132 | #define GRDOM_MEDIA (3<<2) | ||
133 | #define GRDOM_MASK (3<<2) | ||
134 | #define GRDOM_RESET_STATUS (1<<1) | ||
135 | #define GRDOM_RESET_ENABLE (1<<0) | ||
136 | 154 | ||
137 | #define ILK_GDSR _MMIO(MCHBAR_MIRROR_BASE + 0x2ca4) | 155 | #define ILK_GDSR _MMIO(MCHBAR_MIRROR_BASE + 0x2ca4) |
138 | #define ILK_GRDOM_FULL (0<<1) | 156 | #define ILK_GRDOM_FULL (0<<1) |
@@ -1375,14 +1393,10 @@ enum skl_disp_power_wells { | |||
1375 | 1393 | ||
1376 | #define _PORT_REF_DW6_A 0x162198 | 1394 | #define _PORT_REF_DW6_A 0x162198 |
1377 | #define _PORT_REF_DW6_BC 0x6C198 | 1395 | #define _PORT_REF_DW6_BC 0x6C198 |
1378 | /* | 1396 | #define GRC_CODE_SHIFT 24 |
1379 | * FIXME: BSpec/CHV ConfigDB disagrees on the following two fields, fix them | 1397 | #define GRC_CODE_MASK (0xFF << GRC_CODE_SHIFT) |
1380 | * after testing. | ||
1381 | */ | ||
1382 | #define GRC_CODE_SHIFT 23 | ||
1383 | #define GRC_CODE_MASK (0x1FF << GRC_CODE_SHIFT) | ||
1384 | #define GRC_CODE_FAST_SHIFT 16 | 1398 | #define GRC_CODE_FAST_SHIFT 16 |
1385 | #define GRC_CODE_FAST_MASK (0x7F << GRC_CODE_FAST_SHIFT) | 1399 | #define GRC_CODE_FAST_MASK (0xFF << GRC_CODE_FAST_SHIFT) |
1386 | #define GRC_CODE_SLOW_SHIFT 8 | 1400 | #define GRC_CODE_SLOW_SHIFT 8 |
1387 | #define GRC_CODE_SLOW_MASK (0xFF << GRC_CODE_SLOW_SHIFT) | 1401 | #define GRC_CODE_SLOW_MASK (0xFF << GRC_CODE_SLOW_SHIFT) |
1388 | #define GRC_CODE_NOM_MASK 0xFF | 1402 | #define GRC_CODE_NOM_MASK 0xFF |
@@ -2934,7 +2948,14 @@ enum skl_disp_power_wells { | |||
2934 | #define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998) | 2948 | #define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998) |
2935 | #define BXT_RP_STATE_CAP _MMIO(0x138170) | 2949 | #define BXT_RP_STATE_CAP _MMIO(0x138170) |
2936 | 2950 | ||
2937 | #define INTERVAL_1_28_US(us) (((us) * 100) >> 7) | 2951 | /* |
2952 | * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS | ||
2953 | * 8300) freezing up around GPU hangs. Looks as if even | ||
2954 | * scheduling/timer interrupts start misbehaving if the RPS | ||
2955 | * EI/thresholds are "bad", leading to a very sluggish or even | ||
2956 | * frozen machine. | ||
2957 | */ | ||
2958 | #define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25) | ||
2938 | #define INTERVAL_1_33_US(us) (((us) * 3) >> 2) | 2959 | #define INTERVAL_1_33_US(us) (((us) * 3) >> 2) |
2939 | #define INTERVAL_0_833_US(us) (((us) * 6) / 5) | 2960 | #define INTERVAL_0_833_US(us) (((us) * 6) / 5) |
2940 | #define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \ | 2961 | #define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \ |
@@ -2943,6 +2964,15 @@ enum skl_disp_power_wells { | |||
2943 | INTERVAL_1_33_US(us)) : \ | 2964 | INTERVAL_1_33_US(us)) : \ |
2944 | INTERVAL_1_28_US(us)) | 2965 | INTERVAL_1_28_US(us)) |
2945 | 2966 | ||
2967 | #define INTERVAL_1_28_TO_US(interval) (((interval) << 7) / 100) | ||
2968 | #define INTERVAL_1_33_TO_US(interval) (((interval) << 2) / 3) | ||
2969 | #define INTERVAL_0_833_TO_US(interval) (((interval) * 5) / 6) | ||
2970 | #define GT_PM_INTERVAL_TO_US(dev_priv, interval) (IS_GEN9(dev_priv) ? \ | ||
2971 | (IS_BROXTON(dev_priv) ? \ | ||
2972 | INTERVAL_0_833_TO_US(interval) : \ | ||
2973 | INTERVAL_1_33_TO_US(interval)) : \ | ||
2974 | INTERVAL_1_28_TO_US(interval)) | ||
2975 | |||
2946 | /* | 2976 | /* |
2947 | * Logical Context regs | 2977 | * Logical Context regs |
2948 | */ | 2978 | */ |
@@ -6866,6 +6896,8 @@ enum skl_disp_power_wells { | |||
6866 | #define VLV_SPAREG2H _MMIO(0xA194) | 6896 | #define VLV_SPAREG2H _MMIO(0xA194) |
6867 | 6897 | ||
6868 | #define GTFIFODBG _MMIO(0x120000) | 6898 | #define GTFIFODBG _MMIO(0x120000) |
6899 | #define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20) | ||
6900 | #define GT_FIFO_FREE_ENTRIES_CHV (0x7f << 13) | ||
6869 | #define GT_FIFO_SBDROPERR (1<<6) | 6901 | #define GT_FIFO_SBDROPERR (1<<6) |
6870 | #define GT_FIFO_BLOBDROPERR (1<<5) | 6902 | #define GT_FIFO_BLOBDROPERR (1<<5) |
6871 | #define GT_FIFO_SB_READ_ABORTERR (1<<4) | 6903 | #define GT_FIFO_SB_READ_ABORTERR (1<<4) |
@@ -6882,8 +6914,11 @@ enum skl_disp_power_wells { | |||
6882 | 6914 | ||
6883 | #define HSW_IDICR _MMIO(0x9008) | 6915 | #define HSW_IDICR _MMIO(0x9008) |
6884 | #define IDIHASHMSK(x) (((x) & 0x3f) << 16) | 6916 | #define IDIHASHMSK(x) (((x) & 0x3f) << 16) |
6885 | #define HSW_EDRAM_PRESENT _MMIO(0x120010) | 6917 | #define HSW_EDRAM_CAP _MMIO(0x120010) |
6886 | #define EDRAM_ENABLED 0x1 | 6918 | #define EDRAM_ENABLED 0x1 |
6919 | #define EDRAM_NUM_BANKS(cap) (((cap) >> 1) & 0xf) | ||
6920 | #define EDRAM_WAYS_IDX(cap) (((cap) >> 5) & 0x7) | ||
6921 | #define EDRAM_SETS_IDX(cap) (((cap) >> 8) & 0x3) | ||
6887 | 6922 | ||
6888 | #define GEN6_UCGCTL1 _MMIO(0x9400) | 6923 | #define GEN6_UCGCTL1 _MMIO(0x9400) |
6889 | # define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16) | 6924 | # define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16) |
@@ -7161,6 +7196,7 @@ enum skl_disp_power_wells { | |||
7161 | 7196 | ||
7162 | #define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194) | 7197 | #define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194) |
7163 | #define GEN9_ENABLE_YV12_BUGFIX (1<<4) | 7198 | #define GEN9_ENABLE_YV12_BUGFIX (1<<4) |
7199 | #define GEN9_ENABLE_GPGPU_PREEMPTION (1<<2) | ||
7164 | 7200 | ||
7165 | /* Audio */ | 7201 | /* Audio */ |
7166 | #define G4X_AUD_VID_DID _MMIO(dev_priv->info.display_mmio_offset + 0x62020) | 7202 | #define G4X_AUD_VID_DID _MMIO(dev_priv->info.display_mmio_offset + 0x62020) |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index eb756c41d9e1..e72dd9a8d6bf 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -58,8 +58,6 @@ | |||
58 | #define SLAVE_ADDR1 0x70 | 58 | #define SLAVE_ADDR1 0x70 |
59 | #define SLAVE_ADDR2 0x72 | 59 | #define SLAVE_ADDR2 0x72 |
60 | 60 | ||
61 | static int panel_type; | ||
62 | |||
63 | /* Get BDB block size given a pointer to Block ID. */ | 61 | /* Get BDB block size given a pointer to Block ID. */ |
64 | static u32 _get_blocksize(const u8 *block_base) | 62 | static u32 _get_blocksize(const u8 *block_base) |
65 | { | 63 | { |
@@ -205,17 +203,32 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, | |||
205 | const struct lvds_dvo_timing *panel_dvo_timing; | 203 | const struct lvds_dvo_timing *panel_dvo_timing; |
206 | const struct lvds_fp_timing *fp_timing; | 204 | const struct lvds_fp_timing *fp_timing; |
207 | struct drm_display_mode *panel_fixed_mode; | 205 | struct drm_display_mode *panel_fixed_mode; |
206 | int panel_type; | ||
208 | int drrs_mode; | 207 | int drrs_mode; |
208 | int ret; | ||
209 | 209 | ||
210 | lvds_options = find_section(bdb, BDB_LVDS_OPTIONS); | 210 | lvds_options = find_section(bdb, BDB_LVDS_OPTIONS); |
211 | if (!lvds_options) | 211 | if (!lvds_options) |
212 | return; | 212 | return; |
213 | 213 | ||
214 | dev_priv->vbt.lvds_dither = lvds_options->pixel_dither; | 214 | dev_priv->vbt.lvds_dither = lvds_options->pixel_dither; |
215 | if (lvds_options->panel_type == 0xff) | ||
216 | return; | ||
217 | 215 | ||
218 | panel_type = lvds_options->panel_type; | 216 | ret = intel_opregion_get_panel_type(dev_priv->dev); |
217 | if (ret >= 0) { | ||
218 | WARN_ON(ret > 0xf); | ||
219 | panel_type = ret; | ||
220 | DRM_DEBUG_KMS("Panel type: %d (OpRegion)\n", panel_type); | ||
221 | } else { | ||
222 | if (lvds_options->panel_type > 0xf) { | ||
223 | DRM_DEBUG_KMS("Invalid VBT panel type 0x%x\n", | ||
224 | lvds_options->panel_type); | ||
225 | return; | ||
226 | } | ||
227 | panel_type = lvds_options->panel_type; | ||
228 | DRM_DEBUG_KMS("Panel type: %d (VBT)\n", panel_type); | ||
229 | } | ||
230 | |||
231 | dev_priv->vbt.panel_type = panel_type; | ||
219 | 232 | ||
220 | drrs_mode = (lvds_options->dps_panel_type_bits | 233 | drrs_mode = (lvds_options->dps_panel_type_bits |
221 | >> (panel_type * 2)) & MODE_MASK; | 234 | >> (panel_type * 2)) & MODE_MASK; |
@@ -251,7 +264,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, | |||
251 | 264 | ||
252 | panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data, | 265 | panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data, |
253 | lvds_lfp_data_ptrs, | 266 | lvds_lfp_data_ptrs, |
254 | lvds_options->panel_type); | 267 | panel_type); |
255 | 268 | ||
256 | panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); | 269 | panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); |
257 | if (!panel_fixed_mode) | 270 | if (!panel_fixed_mode) |
@@ -266,7 +279,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, | |||
266 | 279 | ||
267 | fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data, | 280 | fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data, |
268 | lvds_lfp_data_ptrs, | 281 | lvds_lfp_data_ptrs, |
269 | lvds_options->panel_type); | 282 | panel_type); |
270 | if (fp_timing) { | 283 | if (fp_timing) { |
271 | /* check the resolution, just to be sure */ | 284 | /* check the resolution, just to be sure */ |
272 | if (fp_timing->x_res == panel_fixed_mode->hdisplay && | 285 | if (fp_timing->x_res == panel_fixed_mode->hdisplay && |
@@ -284,6 +297,7 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, | |||
284 | { | 297 | { |
285 | const struct bdb_lfp_backlight_data *backlight_data; | 298 | const struct bdb_lfp_backlight_data *backlight_data; |
286 | const struct bdb_lfp_backlight_data_entry *entry; | 299 | const struct bdb_lfp_backlight_data_entry *entry; |
300 | int panel_type = dev_priv->vbt.panel_type; | ||
287 | 301 | ||
288 | backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT); | 302 | backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT); |
289 | if (!backlight_data) | 303 | if (!backlight_data) |
@@ -546,6 +560,7 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) | |||
546 | const struct bdb_edp *edp; | 560 | const struct bdb_edp *edp; |
547 | const struct edp_power_seq *edp_pps; | 561 | const struct edp_power_seq *edp_pps; |
548 | const struct edp_link_params *edp_link_params; | 562 | const struct edp_link_params *edp_link_params; |
563 | int panel_type = dev_priv->vbt.panel_type; | ||
549 | 564 | ||
550 | edp = find_section(bdb, BDB_EDP); | 565 | edp = find_section(bdb, BDB_EDP); |
551 | if (!edp) { | 566 | if (!edp) { |
@@ -657,6 +672,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) | |||
657 | { | 672 | { |
658 | const struct bdb_psr *psr; | 673 | const struct bdb_psr *psr; |
659 | const struct psr_table *psr_table; | 674 | const struct psr_table *psr_table; |
675 | int panel_type = dev_priv->vbt.panel_type; | ||
660 | 676 | ||
661 | psr = find_section(bdb, BDB_PSR); | 677 | psr = find_section(bdb, BDB_PSR); |
662 | if (!psr) { | 678 | if (!psr) { |
@@ -703,6 +719,7 @@ parse_mipi_config(struct drm_i915_private *dev_priv, | |||
703 | const struct bdb_mipi_config *start; | 719 | const struct bdb_mipi_config *start; |
704 | const struct mipi_config *config; | 720 | const struct mipi_config *config; |
705 | const struct mipi_pps_data *pps; | 721 | const struct mipi_pps_data *pps; |
722 | int panel_type = dev_priv->vbt.panel_type; | ||
706 | 723 | ||
707 | /* parse MIPI blocks only if LFP type is MIPI */ | 724 | /* parse MIPI blocks only if LFP type is MIPI */ |
708 | if (!intel_bios_is_dsi_present(dev_priv, NULL)) | 725 | if (!intel_bios_is_dsi_present(dev_priv, NULL)) |
@@ -910,6 +927,7 @@ static void | |||
910 | parse_mipi_sequence(struct drm_i915_private *dev_priv, | 927 | parse_mipi_sequence(struct drm_i915_private *dev_priv, |
911 | const struct bdb_header *bdb) | 928 | const struct bdb_header *bdb) |
912 | { | 929 | { |
930 | int panel_type = dev_priv->vbt.panel_type; | ||
913 | const struct bdb_mipi_sequence *sequence; | 931 | const struct bdb_mipi_sequence *sequence; |
914 | const u8 *seq_data; | 932 | const u8 *seq_data; |
915 | u32 seq_size; | 933 | u32 seq_size; |
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 3f57cb94d9ad..a34c23eceba0 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c | |||
@@ -50,6 +50,7 @@ MODULE_FIRMWARE(I915_CSR_SKL); | |||
50 | MODULE_FIRMWARE(I915_CSR_BXT); | 50 | MODULE_FIRMWARE(I915_CSR_BXT); |
51 | 51 | ||
52 | #define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23) | 52 | #define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23) |
53 | #define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) | ||
53 | 54 | ||
54 | #define CSR_MAX_FW_SIZE 0x2FFF | 55 | #define CSR_MAX_FW_SIZE 0x2FFF |
55 | #define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF | 56 | #define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF |
@@ -281,6 +282,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, | |||
281 | uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes; | 282 | uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes; |
282 | uint32_t i; | 283 | uint32_t i; |
283 | uint32_t *dmc_payload; | 284 | uint32_t *dmc_payload; |
285 | uint32_t required_min_version; | ||
284 | 286 | ||
285 | if (!fw) | 287 | if (!fw) |
286 | return NULL; | 288 | return NULL; |
@@ -296,15 +298,23 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, | |||
296 | 298 | ||
297 | csr->version = css_header->version; | 299 | csr->version = css_header->version; |
298 | 300 | ||
299 | if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && | 301 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { |
300 | csr->version < SKL_CSR_VERSION_REQUIRED) { | 302 | required_min_version = SKL_CSR_VERSION_REQUIRED; |
301 | DRM_INFO("Refusing to load old Skylake DMC firmware v%u.%u," | 303 | } else if (IS_BROXTON(dev_priv)) { |
304 | required_min_version = BXT_CSR_VERSION_REQUIRED; | ||
305 | } else { | ||
306 | MISSING_CASE(INTEL_REVID(dev_priv)); | ||
307 | required_min_version = 0; | ||
308 | } | ||
309 | |||
310 | if (csr->version < required_min_version) { | ||
311 | DRM_INFO("Refusing to load old DMC firmware v%u.%u," | ||
302 | " please upgrade to v%u.%u or later" | 312 | " please upgrade to v%u.%u or later" |
303 | " [" FIRMWARE_URL "].\n", | 313 | " [" FIRMWARE_URL "].\n", |
304 | CSR_VERSION_MAJOR(csr->version), | 314 | CSR_VERSION_MAJOR(csr->version), |
305 | CSR_VERSION_MINOR(csr->version), | 315 | CSR_VERSION_MINOR(csr->version), |
306 | CSR_VERSION_MAJOR(SKL_CSR_VERSION_REQUIRED), | 316 | CSR_VERSION_MAJOR(required_min_version), |
307 | CSR_VERSION_MINOR(SKL_CSR_VERSION_REQUIRED)); | 317 | CSR_VERSION_MINOR(required_min_version)); |
308 | return NULL; | 318 | return NULL; |
309 | } | 319 | } |
310 | 320 | ||
@@ -457,10 +467,50 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) | |||
457 | } | 467 | } |
458 | 468 | ||
459 | /** | 469 | /** |
470 | * intel_csr_ucode_suspend() - prepare CSR firmware before system suspend | ||
471 | * @dev_priv: i915 drm device | ||
472 | * | ||
473 | * Prepare the DMC firmware before entering system suspend. This includes | ||
474 | * flushing pending work items and releasing any resources acquired during | ||
475 | * init. | ||
476 | */ | ||
477 | void intel_csr_ucode_suspend(struct drm_i915_private *dev_priv) | ||
478 | { | ||
479 | if (!HAS_CSR(dev_priv)) | ||
480 | return; | ||
481 | |||
482 | flush_work(&dev_priv->csr.work); | ||
483 | |||
484 | /* Drop the reference held in case DMC isn't loaded. */ | ||
485 | if (!dev_priv->csr.dmc_payload) | ||
486 | intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); | ||
487 | } | ||
488 | |||
489 | /** | ||
490 | * intel_csr_ucode_resume() - init CSR firmware during system resume | ||
491 | * @dev_priv: i915 drm device | ||
492 | * | ||
493 | * Reinitialize the DMC firmware during system resume, reacquiring any | ||
494 | * resources released in intel_csr_ucode_suspend(). | ||
495 | */ | ||
496 | void intel_csr_ucode_resume(struct drm_i915_private *dev_priv) | ||
497 | { | ||
498 | if (!HAS_CSR(dev_priv)) | ||
499 | return; | ||
500 | |||
501 | /* | ||
502 | * Reacquire the reference to keep RPM disabled in case DMC isn't | ||
503 | * loaded. | ||
504 | */ | ||
505 | if (!dev_priv->csr.dmc_payload) | ||
506 | intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); | ||
507 | } | ||
508 | |||
509 | /** | ||
460 | * intel_csr_ucode_fini() - unload the CSR firmware. | 510 | * intel_csr_ucode_fini() - unload the CSR firmware. |
461 | * @dev_priv: i915 drm device. | 511 | * @dev_priv: i915 drm device. |
462 | * | 512 | * |
463 | * Firmmware unloading includes freeing the internal momory and reset the | 513 | * Firmmware unloading includes freeing the internal memory and reset the |
464 | * firmware loading status. | 514 | * firmware loading status. |
465 | */ | 515 | */ |
466 | void intel_csr_ucode_fini(struct drm_i915_private *dev_priv) | 516 | void intel_csr_ucode_fini(struct drm_i915_private *dev_priv) |
@@ -468,7 +518,7 @@ void intel_csr_ucode_fini(struct drm_i915_private *dev_priv) | |||
468 | if (!HAS_CSR(dev_priv)) | 518 | if (!HAS_CSR(dev_priv)) |
469 | return; | 519 | return; |
470 | 520 | ||
471 | flush_work(&dev_priv->csr.work); | 521 | intel_csr_ucode_suspend(dev_priv); |
472 | 522 | ||
473 | kfree(dev_priv->csr.dmc_payload); | 523 | kfree(dev_priv->csr.dmc_payload); |
474 | } | 524 | } |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 921edf183d22..e30e1781fd71 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -443,9 +443,17 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) | |||
443 | } else if (IS_BROADWELL(dev_priv)) { | 443 | } else if (IS_BROADWELL(dev_priv)) { |
444 | ddi_translations_fdi = bdw_ddi_translations_fdi; | 444 | ddi_translations_fdi = bdw_ddi_translations_fdi; |
445 | ddi_translations_dp = bdw_ddi_translations_dp; | 445 | ddi_translations_dp = bdw_ddi_translations_dp; |
446 | ddi_translations_edp = bdw_ddi_translations_edp; | 446 | |
447 | if (dev_priv->vbt.edp.low_vswing) { | ||
448 | ddi_translations_edp = bdw_ddi_translations_edp; | ||
449 | n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); | ||
450 | } else { | ||
451 | ddi_translations_edp = bdw_ddi_translations_dp; | ||
452 | n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); | ||
453 | } | ||
454 | |||
447 | ddi_translations_hdmi = bdw_ddi_translations_hdmi; | 455 | ddi_translations_hdmi = bdw_ddi_translations_hdmi; |
448 | n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); | 456 | |
449 | n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); | 457 | n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); |
450 | n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); | 458 | n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); |
451 | hdmi_default_entry = 7; | 459 | hdmi_default_entry = 7; |
@@ -1722,12 +1730,78 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder) | |||
1722 | } | 1730 | } |
1723 | } | 1731 | } |
1724 | 1732 | ||
1733 | static bool broxton_phy_is_enabled(struct drm_i915_private *dev_priv, | ||
1734 | enum dpio_phy phy) | ||
1735 | { | ||
1736 | if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy))) | ||
1737 | return false; | ||
1738 | |||
1739 | if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) & | ||
1740 | (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) { | ||
1741 | DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n", | ||
1742 | phy); | ||
1743 | |||
1744 | return false; | ||
1745 | } | ||
1746 | |||
1747 | if (phy == DPIO_PHY1 && | ||
1748 | !(I915_READ(BXT_PORT_REF_DW3(DPIO_PHY1)) & GRC_DONE)) { | ||
1749 | DRM_DEBUG_DRIVER("DDI PHY 1 powered, but GRC isn't done\n"); | ||
1750 | |||
1751 | return false; | ||
1752 | } | ||
1753 | |||
1754 | if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) { | ||
1755 | DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n", | ||
1756 | phy); | ||
1757 | |||
1758 | return false; | ||
1759 | } | ||
1760 | |||
1761 | return true; | ||
1762 | } | ||
1763 | |||
1764 | static u32 broxton_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy) | ||
1765 | { | ||
1766 | u32 val = I915_READ(BXT_PORT_REF_DW6(phy)); | ||
1767 | |||
1768 | return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT; | ||
1769 | } | ||
1770 | |||
1771 | static void broxton_phy_wait_grc_done(struct drm_i915_private *dev_priv, | ||
1772 | enum dpio_phy phy) | ||
1773 | { | ||
1774 | if (wait_for(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE, 10)) | ||
1775 | DRM_ERROR("timeout waiting for PHY%d GRC\n", phy); | ||
1776 | } | ||
1777 | |||
1778 | static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv, | ||
1779 | enum dpio_phy phy); | ||
1780 | |||
1725 | static void broxton_phy_init(struct drm_i915_private *dev_priv, | 1781 | static void broxton_phy_init(struct drm_i915_private *dev_priv, |
1726 | enum dpio_phy phy) | 1782 | enum dpio_phy phy) |
1727 | { | 1783 | { |
1728 | enum port port; | 1784 | enum port port; |
1729 | u32 ports, val; | 1785 | u32 ports, val; |
1730 | 1786 | ||
1787 | if (broxton_phy_is_enabled(dev_priv, phy)) { | ||
1788 | /* Still read out the GRC value for state verification */ | ||
1789 | if (phy == DPIO_PHY0) | ||
1790 | dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv, phy); | ||
1791 | |||
1792 | if (broxton_phy_verify_state(dev_priv, phy)) { | ||
1793 | DRM_DEBUG_DRIVER("DDI PHY %d already enabled, " | ||
1794 | "won't reprogram it\n", phy); | ||
1795 | |||
1796 | return; | ||
1797 | } | ||
1798 | |||
1799 | DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, " | ||
1800 | "force reprogramming it\n", phy); | ||
1801 | } else { | ||
1802 | DRM_DEBUG_DRIVER("DDI PHY %d not enabled, enabling it\n", phy); | ||
1803 | } | ||
1804 | |||
1731 | val = I915_READ(BXT_P_CR_GT_DISP_PWRON); | 1805 | val = I915_READ(BXT_P_CR_GT_DISP_PWRON); |
1732 | val |= GT_DISPLAY_POWER_ON(phy); | 1806 | val |= GT_DISPLAY_POWER_ON(phy); |
1733 | I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val); | 1807 | I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val); |
@@ -1798,6 +1872,9 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv, | |||
1798 | * enabled. | 1872 | * enabled. |
1799 | * TODO: port C is only connected on BXT-P, so on BXT0/1 we should | 1873 | * TODO: port C is only connected on BXT-P, so on BXT0/1 we should |
1800 | * power down the second channel on PHY0 as well. | 1874 | * power down the second channel on PHY0 as well. |
1875 | * | ||
1876 | * FIXME: Clarify programming of the following, the register is | ||
1877 | * read-only with bit 6 fixed at 0 at least in stepping A. | ||
1801 | */ | 1878 | */ |
1802 | if (phy == DPIO_PHY1) | 1879 | if (phy == DPIO_PHY1) |
1803 | val |= OCL2_LDOFUSE_PWR_DIS; | 1880 | val |= OCL2_LDOFUSE_PWR_DIS; |
@@ -1810,12 +1887,10 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv, | |||
1810 | * the corresponding calibrated value from PHY1, and disable | 1887 | * the corresponding calibrated value from PHY1, and disable |
1811 | * the automatic calibration on PHY0. | 1888 | * the automatic calibration on PHY0. |
1812 | */ | 1889 | */ |
1813 | if (wait_for(I915_READ(BXT_PORT_REF_DW3(DPIO_PHY1)) & GRC_DONE, | 1890 | broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1); |
1814 | 10)) | ||
1815 | DRM_ERROR("timeout waiting for PHY1 GRC\n"); | ||
1816 | 1891 | ||
1817 | val = I915_READ(BXT_PORT_REF_DW6(DPIO_PHY1)); | 1892 | val = dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv, |
1818 | val = (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT; | 1893 | DPIO_PHY1); |
1819 | grc_code = val << GRC_CODE_FAST_SHIFT | | 1894 | grc_code = val << GRC_CODE_FAST_SHIFT | |
1820 | val << GRC_CODE_SLOW_SHIFT | | 1895 | val << GRC_CODE_SLOW_SHIFT | |
1821 | val; | 1896 | val; |
@@ -1825,17 +1900,27 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv, | |||
1825 | val |= GRC_DIS | GRC_RDY_OVRD; | 1900 | val |= GRC_DIS | GRC_RDY_OVRD; |
1826 | I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val); | 1901 | I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val); |
1827 | } | 1902 | } |
1903 | /* | ||
1904 | * During PHY1 init delay waiting for GRC calibration to finish, since | ||
1905 | * it can happen in parallel with the subsequent PHY0 init. | ||
1906 | */ | ||
1828 | 1907 | ||
1829 | val = I915_READ(BXT_PHY_CTL_FAMILY(phy)); | 1908 | val = I915_READ(BXT_PHY_CTL_FAMILY(phy)); |
1830 | val |= COMMON_RESET_DIS; | 1909 | val |= COMMON_RESET_DIS; |
1831 | I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val); | 1910 | I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val); |
1832 | } | 1911 | } |
1833 | 1912 | ||
1834 | void broxton_ddi_phy_init(struct drm_device *dev) | 1913 | void broxton_ddi_phy_init(struct drm_i915_private *dev_priv) |
1835 | { | 1914 | { |
1836 | /* Enable PHY1 first since it provides Rcomp for PHY0 */ | 1915 | /* Enable PHY1 first since it provides Rcomp for PHY0 */ |
1837 | broxton_phy_init(dev->dev_private, DPIO_PHY1); | 1916 | broxton_phy_init(dev_priv, DPIO_PHY1); |
1838 | broxton_phy_init(dev->dev_private, DPIO_PHY0); | 1917 | broxton_phy_init(dev_priv, DPIO_PHY0); |
1918 | |||
1919 | /* | ||
1920 | * If BIOS enabled only PHY0 and not PHY1, we skipped waiting for the | ||
1921 | * PHY1 GRC calibration to finish, so wait for it here. | ||
1922 | */ | ||
1923 | broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1); | ||
1839 | } | 1924 | } |
1840 | 1925 | ||
1841 | static void broxton_phy_uninit(struct drm_i915_private *dev_priv, | 1926 | static void broxton_phy_uninit(struct drm_i915_private *dev_priv, |
@@ -1846,17 +1931,126 @@ static void broxton_phy_uninit(struct drm_i915_private *dev_priv, | |||
1846 | val = I915_READ(BXT_PHY_CTL_FAMILY(phy)); | 1931 | val = I915_READ(BXT_PHY_CTL_FAMILY(phy)); |
1847 | val &= ~COMMON_RESET_DIS; | 1932 | val &= ~COMMON_RESET_DIS; |
1848 | I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val); | 1933 | I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val); |
1934 | |||
1935 | val = I915_READ(BXT_P_CR_GT_DISP_PWRON); | ||
1936 | val &= ~GT_DISPLAY_POWER_ON(phy); | ||
1937 | I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val); | ||
1849 | } | 1938 | } |
1850 | 1939 | ||
1851 | void broxton_ddi_phy_uninit(struct drm_device *dev) | 1940 | void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv) |
1852 | { | 1941 | { |
1853 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1854 | |||
1855 | broxton_phy_uninit(dev_priv, DPIO_PHY1); | 1942 | broxton_phy_uninit(dev_priv, DPIO_PHY1); |
1856 | broxton_phy_uninit(dev_priv, DPIO_PHY0); | 1943 | broxton_phy_uninit(dev_priv, DPIO_PHY0); |
1944 | } | ||
1945 | |||
1946 | static bool __printf(6, 7) | ||
1947 | __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy, | ||
1948 | i915_reg_t reg, u32 mask, u32 expected, | ||
1949 | const char *reg_fmt, ...) | ||
1950 | { | ||
1951 | struct va_format vaf; | ||
1952 | va_list args; | ||
1953 | u32 val; | ||
1954 | |||
1955 | val = I915_READ(reg); | ||
1956 | if ((val & mask) == expected) | ||
1957 | return true; | ||
1958 | |||
1959 | va_start(args, reg_fmt); | ||
1960 | vaf.fmt = reg_fmt; | ||
1961 | vaf.va = &args; | ||
1857 | 1962 | ||
1858 | /* FIXME: do this in broxton_phy_uninit per phy */ | 1963 | DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: " |
1859 | I915_WRITE(BXT_P_CR_GT_DISP_PWRON, 0); | 1964 | "current %08x, expected %08x (mask %08x)\n", |
1965 | phy, &vaf, reg.reg, val, (val & ~mask) | expected, | ||
1966 | mask); | ||
1967 | |||
1968 | va_end(args); | ||
1969 | |||
1970 | return false; | ||
1971 | } | ||
1972 | |||
1973 | static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv, | ||
1974 | enum dpio_phy phy) | ||
1975 | { | ||
1976 | enum port port; | ||
1977 | u32 ports; | ||
1978 | uint32_t mask; | ||
1979 | bool ok; | ||
1980 | |||
1981 | #define _CHK(reg, mask, exp, fmt, ...) \ | ||
1982 | __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \ | ||
1983 | ## __VA_ARGS__) | ||
1984 | |||
1985 | /* We expect the PHY to be always enabled */ | ||
1986 | if (!broxton_phy_is_enabled(dev_priv, phy)) | ||
1987 | return false; | ||
1988 | |||
1989 | ok = true; | ||
1990 | |||
1991 | if (phy == DPIO_PHY0) | ||
1992 | ports = BIT(PORT_B) | BIT(PORT_C); | ||
1993 | else | ||
1994 | ports = BIT(PORT_A); | ||
1995 | |||
1996 | for_each_port_masked(port, ports) { | ||
1997 | int lane; | ||
1998 | |||
1999 | for (lane = 0; lane < 4; lane++) | ||
2000 | ok &= _CHK(BXT_PORT_TX_DW14_LN(port, lane), | ||
2001 | LATENCY_OPTIM, | ||
2002 | lane != 1 ? LATENCY_OPTIM : 0, | ||
2003 | "BXT_PORT_TX_DW14_LN(%d, %d)", port, lane); | ||
2004 | } | ||
2005 | |||
2006 | /* PLL Rcomp code offset */ | ||
2007 | ok &= _CHK(BXT_PORT_CL1CM_DW9(phy), | ||
2008 | IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT, | ||
2009 | "BXT_PORT_CL1CM_DW9(%d)", phy); | ||
2010 | ok &= _CHK(BXT_PORT_CL1CM_DW10(phy), | ||
2011 | IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT, | ||
2012 | "BXT_PORT_CL1CM_DW10(%d)", phy); | ||
2013 | |||
2014 | /* Power gating */ | ||
2015 | mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG; | ||
2016 | ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask, | ||
2017 | "BXT_PORT_CL1CM_DW28(%d)", phy); | ||
2018 | |||
2019 | if (phy == DPIO_PHY0) | ||
2020 | ok &= _CHK(BXT_PORT_CL2CM_DW6_BC, | ||
2021 | DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN, | ||
2022 | "BXT_PORT_CL2CM_DW6_BC"); | ||
2023 | |||
2024 | /* | ||
2025 | * TODO: Verify BXT_PORT_CL1CM_DW30 bit OCL2_LDOFUSE_PWR_DIS, | ||
2026 | * at least on stepping A this bit is read-only and fixed at 0. | ||
2027 | */ | ||
2028 | |||
2029 | if (phy == DPIO_PHY0) { | ||
2030 | u32 grc_code = dev_priv->bxt_phy_grc; | ||
2031 | |||
2032 | grc_code = grc_code << GRC_CODE_FAST_SHIFT | | ||
2033 | grc_code << GRC_CODE_SLOW_SHIFT | | ||
2034 | grc_code; | ||
2035 | mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK | | ||
2036 | GRC_CODE_NOM_MASK; | ||
2037 | ok &= _CHK(BXT_PORT_REF_DW6(DPIO_PHY0), mask, grc_code, | ||
2038 | "BXT_PORT_REF_DW6(%d)", DPIO_PHY0); | ||
2039 | |||
2040 | mask = GRC_DIS | GRC_RDY_OVRD; | ||
2041 | ok &= _CHK(BXT_PORT_REF_DW8(DPIO_PHY0), mask, mask, | ||
2042 | "BXT_PORT_REF_DW8(%d)", DPIO_PHY0); | ||
2043 | } | ||
2044 | |||
2045 | return ok; | ||
2046 | #undef _CHK | ||
2047 | } | ||
2048 | |||
2049 | void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv) | ||
2050 | { | ||
2051 | if (!broxton_phy_verify_state(dev_priv, DPIO_PHY0) || | ||
2052 | !broxton_phy_verify_state(dev_priv, DPIO_PHY1)) | ||
2053 | i915_report_error(dev_priv, "DDI PHY state mismatch\n"); | ||
1860 | } | 2054 | } |
1861 | 2055 | ||
1862 | void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) | 2056 | void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) |
@@ -2044,12 +2238,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder, | |||
2044 | intel_ddi_clock_get(encoder, pipe_config); | 2238 | intel_ddi_clock_get(encoder, pipe_config); |
2045 | } | 2239 | } |
2046 | 2240 | ||
2047 | static void intel_ddi_destroy(struct drm_encoder *encoder) | ||
2048 | { | ||
2049 | /* HDMI has nothing special to destroy, so we can go with this. */ | ||
2050 | intel_dp_encoder_destroy(encoder); | ||
2051 | } | ||
2052 | |||
2053 | static bool intel_ddi_compute_config(struct intel_encoder *encoder, | 2241 | static bool intel_ddi_compute_config(struct intel_encoder *encoder, |
2054 | struct intel_crtc_state *pipe_config) | 2242 | struct intel_crtc_state *pipe_config) |
2055 | { | 2243 | { |
@@ -2068,7 +2256,8 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder, | |||
2068 | } | 2256 | } |
2069 | 2257 | ||
2070 | static const struct drm_encoder_funcs intel_ddi_funcs = { | 2258 | static const struct drm_encoder_funcs intel_ddi_funcs = { |
2071 | .destroy = intel_ddi_destroy, | 2259 | .reset = intel_dp_encoder_reset, |
2260 | .destroy = intel_dp_encoder_destroy, | ||
2072 | }; | 2261 | }; |
2073 | 2262 | ||
2074 | static struct intel_connector * | 2263 | static struct intel_connector * |
@@ -2167,6 +2356,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port) | |||
2167 | intel_encoder->post_disable = intel_ddi_post_disable; | 2356 | intel_encoder->post_disable = intel_ddi_post_disable; |
2168 | intel_encoder->get_hw_state = intel_ddi_get_hw_state; | 2357 | intel_encoder->get_hw_state = intel_ddi_get_hw_state; |
2169 | intel_encoder->get_config = intel_ddi_get_config; | 2358 | intel_encoder->get_config = intel_ddi_get_config; |
2359 | intel_encoder->suspend = intel_dp_encoder_suspend; | ||
2170 | 2360 | ||
2171 | intel_dig_port->port = port; | 2361 | intel_dig_port->port = port; |
2172 | intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & | 2362 | intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 551541b3038c..ff60241b1f76 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -1530,45 +1530,47 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, | |||
1530 | assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); | 1530 | assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); |
1531 | } | 1531 | } |
1532 | 1532 | ||
1533 | static void _vlv_enable_pll(struct intel_crtc *crtc, | ||
1534 | const struct intel_crtc_state *pipe_config) | ||
1535 | { | ||
1536 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | ||
1537 | enum pipe pipe = crtc->pipe; | ||
1538 | |||
1539 | I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); | ||
1540 | POSTING_READ(DPLL(pipe)); | ||
1541 | udelay(150); | ||
1542 | |||
1543 | if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) | ||
1544 | DRM_ERROR("DPLL %d failed to lock\n", pipe); | ||
1545 | } | ||
1546 | |||
1533 | static void vlv_enable_pll(struct intel_crtc *crtc, | 1547 | static void vlv_enable_pll(struct intel_crtc *crtc, |
1534 | const struct intel_crtc_state *pipe_config) | 1548 | const struct intel_crtc_state *pipe_config) |
1535 | { | 1549 | { |
1536 | struct drm_device *dev = crtc->base.dev; | 1550 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
1537 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1538 | enum pipe pipe = crtc->pipe; | 1551 | enum pipe pipe = crtc->pipe; |
1539 | i915_reg_t reg = DPLL(pipe); | ||
1540 | u32 dpll = pipe_config->dpll_hw_state.dpll; | ||
1541 | 1552 | ||
1542 | assert_pipe_disabled(dev_priv, pipe); | 1553 | assert_pipe_disabled(dev_priv, pipe); |
1543 | 1554 | ||
1544 | /* PLL is protected by panel, make sure we can write it */ | 1555 | /* PLL is protected by panel, make sure we can write it */ |
1545 | assert_panel_unlocked(dev_priv, pipe); | 1556 | assert_panel_unlocked(dev_priv, pipe); |
1546 | 1557 | ||
1547 | I915_WRITE(reg, dpll); | 1558 | if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) |
1548 | POSTING_READ(reg); | 1559 | _vlv_enable_pll(crtc, pipe_config); |
1549 | udelay(150); | ||
1550 | |||
1551 | if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) | ||
1552 | DRM_ERROR("DPLL %d failed to lock\n", pipe); | ||
1553 | 1560 | ||
1554 | I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); | 1561 | I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); |
1555 | POSTING_READ(DPLL_MD(pipe)); | 1562 | POSTING_READ(DPLL_MD(pipe)); |
1556 | } | 1563 | } |
1557 | 1564 | ||
1558 | static void chv_enable_pll(struct intel_crtc *crtc, | 1565 | |
1559 | const struct intel_crtc_state *pipe_config) | 1566 | static void _chv_enable_pll(struct intel_crtc *crtc, |
1567 | const struct intel_crtc_state *pipe_config) | ||
1560 | { | 1568 | { |
1561 | struct drm_device *dev = crtc->base.dev; | 1569 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
1562 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1563 | enum pipe pipe = crtc->pipe; | 1570 | enum pipe pipe = crtc->pipe; |
1564 | enum dpio_channel port = vlv_pipe_to_channel(pipe); | 1571 | enum dpio_channel port = vlv_pipe_to_channel(pipe); |
1565 | u32 tmp; | 1572 | u32 tmp; |
1566 | 1573 | ||
1567 | assert_pipe_disabled(dev_priv, pipe); | ||
1568 | |||
1569 | /* PLL is protected by panel, make sure we can write it */ | ||
1570 | assert_panel_unlocked(dev_priv, pipe); | ||
1571 | |||
1572 | mutex_lock(&dev_priv->sb_lock); | 1574 | mutex_lock(&dev_priv->sb_lock); |
1573 | 1575 | ||
1574 | /* Enable back the 10bit clock to display controller */ | 1576 | /* Enable back the 10bit clock to display controller */ |
@@ -1589,6 +1591,21 @@ static void chv_enable_pll(struct intel_crtc *crtc, | |||
1589 | /* Check PLL is locked */ | 1591 | /* Check PLL is locked */ |
1590 | if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) | 1592 | if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) |
1591 | DRM_ERROR("PLL %d failed to lock\n", pipe); | 1593 | DRM_ERROR("PLL %d failed to lock\n", pipe); |
1594 | } | ||
1595 | |||
1596 | static void chv_enable_pll(struct intel_crtc *crtc, | ||
1597 | const struct intel_crtc_state *pipe_config) | ||
1598 | { | ||
1599 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | ||
1600 | enum pipe pipe = crtc->pipe; | ||
1601 | |||
1602 | assert_pipe_disabled(dev_priv, pipe); | ||
1603 | |||
1604 | /* PLL is protected by panel, make sure we can write it */ | ||
1605 | assert_panel_unlocked(dev_priv, pipe); | ||
1606 | |||
1607 | if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) | ||
1608 | _chv_enable_pll(crtc, pipe_config); | ||
1592 | 1609 | ||
1593 | if (pipe != PIPE_A) { | 1610 | if (pipe != PIPE_A) { |
1594 | /* | 1611 | /* |
@@ -3198,12 +3215,12 @@ void intel_finish_reset(struct drm_device *dev) | |||
3198 | static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) | 3215 | static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) |
3199 | { | 3216 | { |
3200 | struct drm_device *dev = crtc->dev; | 3217 | struct drm_device *dev = crtc->dev; |
3201 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3202 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 3218 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3219 | unsigned reset_counter; | ||
3203 | bool pending; | 3220 | bool pending; |
3204 | 3221 | ||
3205 | if (i915_reset_in_progress(&dev_priv->gpu_error) || | 3222 | reset_counter = i915_reset_counter(&to_i915(dev)->gpu_error); |
3206 | intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) | 3223 | if (intel_crtc->reset_counter != reset_counter) |
3207 | return false; | 3224 | return false; |
3208 | 3225 | ||
3209 | spin_lock_irq(&dev->event_lock); | 3226 | spin_lock_irq(&dev->event_lock); |
@@ -3805,9 +3822,7 @@ static void page_flip_completed(struct intel_crtc *intel_crtc) | |||
3805 | intel_crtc->unpin_work = NULL; | 3822 | intel_crtc->unpin_work = NULL; |
3806 | 3823 | ||
3807 | if (work->event) | 3824 | if (work->event) |
3808 | drm_send_vblank_event(intel_crtc->base.dev, | 3825 | drm_crtc_send_vblank_event(&intel_crtc->base, work->event); |
3809 | intel_crtc->pipe, | ||
3810 | work->event); | ||
3811 | 3826 | ||
3812 | drm_crtc_vblank_put(&intel_crtc->base); | 3827 | drm_crtc_vblank_put(&intel_crtc->base); |
3813 | 3828 | ||
@@ -4088,12 +4103,6 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) | |||
4088 | I915_WRITE(FDI_RX_TUSIZE1(pipe), | 4103 | I915_WRITE(FDI_RX_TUSIZE1(pipe), |
4089 | I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); | 4104 | I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); |
4090 | 4105 | ||
4091 | /* | ||
4092 | * Sometimes spurious CPU pipe underruns happen during FDI | ||
4093 | * training, at least with VGA+HDMI cloning. Suppress them. | ||
4094 | */ | ||
4095 | intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); | ||
4096 | |||
4097 | /* For PCH output, training FDI link */ | 4106 | /* For PCH output, training FDI link */ |
4098 | dev_priv->display.fdi_link_train(crtc); | 4107 | dev_priv->display.fdi_link_train(crtc); |
4099 | 4108 | ||
@@ -4128,8 +4137,6 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) | |||
4128 | 4137 | ||
4129 | intel_fdi_normal_train(crtc); | 4138 | intel_fdi_normal_train(crtc); |
4130 | 4139 | ||
4131 | intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); | ||
4132 | |||
4133 | /* For PCH DP, enable TRANS_DP_CTL */ | 4140 | /* For PCH DP, enable TRANS_DP_CTL */ |
4134 | if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) { | 4141 | if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) { |
4135 | const struct drm_display_mode *adjusted_mode = | 4142 | const struct drm_display_mode *adjusted_mode = |
@@ -4732,6 +4739,18 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
4732 | if (WARN_ON(intel_crtc->active)) | 4739 | if (WARN_ON(intel_crtc->active)) |
4733 | return; | 4740 | return; |
4734 | 4741 | ||
4742 | /* | ||
4743 | * Sometimes spurious CPU pipe underruns happen during FDI | ||
4744 | * training, at least with VGA+HDMI cloning. Suppress them. | ||
4745 | * | ||
4746 | * On ILK we get an occasional spurious CPU pipe underruns | ||
4747 | * between eDP port A enable and vdd enable. Also PCH port | ||
4748 | * enable seems to result in the occasional CPU pipe underrun. | ||
4749 | * | ||
4750 | * Spurious PCH underruns also occur during PCH enabling. | ||
4751 | */ | ||
4752 | if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv)) | ||
4753 | intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); | ||
4735 | if (intel_crtc->config->has_pch_encoder) | 4754 | if (intel_crtc->config->has_pch_encoder) |
4736 | intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); | 4755 | intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); |
4737 | 4756 | ||
@@ -4753,8 +4772,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
4753 | 4772 | ||
4754 | intel_crtc->active = true; | 4773 | intel_crtc->active = true; |
4755 | 4774 | ||
4756 | intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); | ||
4757 | |||
4758 | for_each_encoder_on_crtc(dev, crtc, encoder) | 4775 | for_each_encoder_on_crtc(dev, crtc, encoder) |
4759 | if (encoder->pre_enable) | 4776 | if (encoder->pre_enable) |
4760 | encoder->pre_enable(encoder); | 4777 | encoder->pre_enable(encoder); |
@@ -4796,6 +4813,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
4796 | /* Must wait for vblank to avoid spurious PCH FIFO underruns */ | 4813 | /* Must wait for vblank to avoid spurious PCH FIFO underruns */ |
4797 | if (intel_crtc->config->has_pch_encoder) | 4814 | if (intel_crtc->config->has_pch_encoder) |
4798 | intel_wait_for_vblank(dev, pipe); | 4815 | intel_wait_for_vblank(dev, pipe); |
4816 | intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); | ||
4799 | intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); | 4817 | intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); |
4800 | } | 4818 | } |
4801 | 4819 | ||
@@ -4948,8 +4966,15 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
4948 | struct intel_encoder *encoder; | 4966 | struct intel_encoder *encoder; |
4949 | int pipe = intel_crtc->pipe; | 4967 | int pipe = intel_crtc->pipe; |
4950 | 4968 | ||
4951 | if (intel_crtc->config->has_pch_encoder) | 4969 | /* |
4970 | * Sometimes spurious CPU pipe underruns happen when the | ||
4971 | * pipe is already disabled, but FDI RX/TX is still enabled. | ||
4972 | * Happens at least with VGA+HDMI cloning. Suppress them. | ||
4973 | */ | ||
4974 | if (intel_crtc->config->has_pch_encoder) { | ||
4975 | intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); | ||
4952 | intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); | 4976 | intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); |
4977 | } | ||
4953 | 4978 | ||
4954 | for_each_encoder_on_crtc(dev, crtc, encoder) | 4979 | for_each_encoder_on_crtc(dev, crtc, encoder) |
4955 | encoder->disable(encoder); | 4980 | encoder->disable(encoder); |
@@ -4957,22 +4982,12 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
4957 | drm_crtc_vblank_off(crtc); | 4982 | drm_crtc_vblank_off(crtc); |
4958 | assert_vblank_disabled(crtc); | 4983 | assert_vblank_disabled(crtc); |
4959 | 4984 | ||
4960 | /* | ||
4961 | * Sometimes spurious CPU pipe underruns happen when the | ||
4962 | * pipe is already disabled, but FDI RX/TX is still enabled. | ||
4963 | * Happens at least with VGA+HDMI cloning. Suppress them. | ||
4964 | */ | ||
4965 | if (intel_crtc->config->has_pch_encoder) | ||
4966 | intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); | ||
4967 | |||
4968 | intel_disable_pipe(intel_crtc); | 4985 | intel_disable_pipe(intel_crtc); |
4969 | 4986 | ||
4970 | ironlake_pfit_disable(intel_crtc, false); | 4987 | ironlake_pfit_disable(intel_crtc, false); |
4971 | 4988 | ||
4972 | if (intel_crtc->config->has_pch_encoder) { | 4989 | if (intel_crtc->config->has_pch_encoder) |
4973 | ironlake_fdi_disable(crtc); | 4990 | ironlake_fdi_disable(crtc); |
4974 | intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); | ||
4975 | } | ||
4976 | 4991 | ||
4977 | for_each_encoder_on_crtc(dev, crtc, encoder) | 4992 | for_each_encoder_on_crtc(dev, crtc, encoder) |
4978 | if (encoder->post_disable) | 4993 | if (encoder->post_disable) |
@@ -5002,6 +5017,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
5002 | ironlake_fdi_pll_disable(intel_crtc); | 5017 | ironlake_fdi_pll_disable(intel_crtc); |
5003 | } | 5018 | } |
5004 | 5019 | ||
5020 | intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); | ||
5005 | intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); | 5021 | intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); |
5006 | } | 5022 | } |
5007 | 5023 | ||
@@ -5329,9 +5345,8 @@ static void intel_update_cdclk(struct drm_device *dev) | |||
5329 | intel_update_max_cdclk(dev); | 5345 | intel_update_max_cdclk(dev); |
5330 | } | 5346 | } |
5331 | 5347 | ||
5332 | static void broxton_set_cdclk(struct drm_device *dev, int frequency) | 5348 | static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int frequency) |
5333 | { | 5349 | { |
5334 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5335 | uint32_t divider; | 5350 | uint32_t divider; |
5336 | uint32_t ratio; | 5351 | uint32_t ratio; |
5337 | uint32_t current_freq; | 5352 | uint32_t current_freq; |
@@ -5445,33 +5460,46 @@ static void broxton_set_cdclk(struct drm_device *dev, int frequency) | |||
5445 | return; | 5460 | return; |
5446 | } | 5461 | } |
5447 | 5462 | ||
5448 | intel_update_cdclk(dev); | 5463 | intel_update_cdclk(dev_priv->dev); |
5449 | } | 5464 | } |
5450 | 5465 | ||
5451 | void broxton_init_cdclk(struct drm_device *dev) | 5466 | static bool broxton_cdclk_is_enabled(struct drm_i915_private *dev_priv) |
5452 | { | 5467 | { |
5453 | struct drm_i915_private *dev_priv = dev->dev_private; | 5468 | if (!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE)) |
5454 | uint32_t val; | 5469 | return false; |
5455 | 5470 | ||
5456 | /* | 5471 | /* TODO: Check for a valid CDCLK rate */ |
5457 | * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT | 5472 | |
5458 | * or else the reset will hang because there is no PCH to respond. | 5473 | if (!(I915_READ(DBUF_CTL) & DBUF_POWER_REQUEST)) { |
5459 | * Move the handshake programming to initialization sequence. | 5474 | DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power not requested\n"); |
5460 | * Previously was left up to BIOS. | 5475 | |
5461 | */ | 5476 | return false; |
5462 | val = I915_READ(HSW_NDE_RSTWRN_OPT); | 5477 | } |
5463 | val &= ~RESET_PCH_HANDSHAKE_ENABLE; | 5478 | |
5464 | I915_WRITE(HSW_NDE_RSTWRN_OPT, val); | 5479 | if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) { |
5480 | DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power hasn't settled\n"); | ||
5481 | |||
5482 | return false; | ||
5483 | } | ||
5484 | |||
5485 | return true; | ||
5486 | } | ||
5465 | 5487 | ||
5466 | /* Enable PG1 for cdclk */ | 5488 | bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv) |
5467 | intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); | 5489 | { |
5490 | return broxton_cdclk_is_enabled(dev_priv); | ||
5491 | } | ||
5468 | 5492 | ||
5493 | void broxton_init_cdclk(struct drm_i915_private *dev_priv) | ||
5494 | { | ||
5469 | /* check if cd clock is enabled */ | 5495 | /* check if cd clock is enabled */ |
5470 | if (I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE) { | 5496 | if (broxton_cdclk_is_enabled(dev_priv)) { |
5471 | DRM_DEBUG_KMS("Display already initialized\n"); | 5497 | DRM_DEBUG_KMS("CDCLK already enabled, won't reprogram it\n"); |
5472 | return; | 5498 | return; |
5473 | } | 5499 | } |
5474 | 5500 | ||
5501 | DRM_DEBUG_KMS("CDCLK not enabled, enabling it\n"); | ||
5502 | |||
5475 | /* | 5503 | /* |
5476 | * FIXME: | 5504 | * FIXME: |
5477 | * - The initial CDCLK needs to be read from VBT. | 5505 | * - The initial CDCLK needs to be read from VBT. |
@@ -5479,7 +5507,7 @@ void broxton_init_cdclk(struct drm_device *dev) | |||
5479 | * - check if setting the max (or any) cdclk freq is really necessary | 5507 | * - check if setting the max (or any) cdclk freq is really necessary |
5480 | * here, it belongs to modeset time | 5508 | * here, it belongs to modeset time |
5481 | */ | 5509 | */ |
5482 | broxton_set_cdclk(dev, 624000); | 5510 | broxton_set_cdclk(dev_priv, 624000); |
5483 | 5511 | ||
5484 | I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); | 5512 | I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); |
5485 | POSTING_READ(DBUF_CTL); | 5513 | POSTING_READ(DBUF_CTL); |
@@ -5490,10 +5518,8 @@ void broxton_init_cdclk(struct drm_device *dev) | |||
5490 | DRM_ERROR("DBuf power enable timeout!\n"); | 5518 | DRM_ERROR("DBuf power enable timeout!\n"); |
5491 | } | 5519 | } |
5492 | 5520 | ||
5493 | void broxton_uninit_cdclk(struct drm_device *dev) | 5521 | void broxton_uninit_cdclk(struct drm_i915_private *dev_priv) |
5494 | { | 5522 | { |
5495 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5496 | |||
5497 | I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); | 5523 | I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); |
5498 | POSTING_READ(DBUF_CTL); | 5524 | POSTING_READ(DBUF_CTL); |
5499 | 5525 | ||
@@ -5503,9 +5529,7 @@ void broxton_uninit_cdclk(struct drm_device *dev) | |||
5503 | DRM_ERROR("DBuf power disable timeout!\n"); | 5529 | DRM_ERROR("DBuf power disable timeout!\n"); |
5504 | 5530 | ||
5505 | /* Set minimum (bypass) frequency, in effect turning off the DE PLL */ | 5531 | /* Set minimum (bypass) frequency, in effect turning off the DE PLL */ |
5506 | broxton_set_cdclk(dev, 19200); | 5532 | broxton_set_cdclk(dev_priv, 19200); |
5507 | |||
5508 | intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); | ||
5509 | } | 5533 | } |
5510 | 5534 | ||
5511 | static const struct skl_cdclk_entry { | 5535 | static const struct skl_cdclk_entry { |
@@ -6072,14 +6096,12 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) | |||
6072 | if (encoder->pre_pll_enable) | 6096 | if (encoder->pre_pll_enable) |
6073 | encoder->pre_pll_enable(encoder); | 6097 | encoder->pre_pll_enable(encoder); |
6074 | 6098 | ||
6075 | if (!intel_crtc->config->has_dsi_encoder) { | 6099 | if (IS_CHERRYVIEW(dev)) { |
6076 | if (IS_CHERRYVIEW(dev)) { | 6100 | chv_prepare_pll(intel_crtc, intel_crtc->config); |
6077 | chv_prepare_pll(intel_crtc, intel_crtc->config); | 6101 | chv_enable_pll(intel_crtc, intel_crtc->config); |
6078 | chv_enable_pll(intel_crtc, intel_crtc->config); | 6102 | } else { |
6079 | } else { | 6103 | vlv_prepare_pll(intel_crtc, intel_crtc->config); |
6080 | vlv_prepare_pll(intel_crtc, intel_crtc->config); | 6104 | vlv_enable_pll(intel_crtc, intel_crtc->config); |
6081 | vlv_enable_pll(intel_crtc, intel_crtc->config); | ||
6082 | } | ||
6083 | } | 6105 | } |
6084 | 6106 | ||
6085 | for_each_encoder_on_crtc(dev, crtc, encoder) | 6107 | for_each_encoder_on_crtc(dev, crtc, encoder) |
@@ -6117,7 +6139,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) | |||
6117 | struct intel_encoder *encoder; | 6139 | struct intel_encoder *encoder; |
6118 | struct intel_crtc_state *pipe_config = | 6140 | struct intel_crtc_state *pipe_config = |
6119 | to_intel_crtc_state(crtc->state); | 6141 | to_intel_crtc_state(crtc->state); |
6120 | int pipe = intel_crtc->pipe; | 6142 | enum pipe pipe = intel_crtc->pipe; |
6121 | 6143 | ||
6122 | if (WARN_ON(intel_crtc->active)) | 6144 | if (WARN_ON(intel_crtc->active)) |
6123 | return; | 6145 | return; |
@@ -7173,11 +7195,15 @@ static void vlv_compute_dpll(struct intel_crtc *crtc, | |||
7173 | struct intel_crtc_state *pipe_config) | 7195 | struct intel_crtc_state *pipe_config) |
7174 | { | 7196 | { |
7175 | pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV | | 7197 | pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV | |
7176 | DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS | | 7198 | DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; |
7177 | DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV; | ||
7178 | if (crtc->pipe != PIPE_A) | 7199 | if (crtc->pipe != PIPE_A) |
7179 | pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; | 7200 | pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; |
7180 | 7201 | ||
7202 | /* DPLL not used with DSI, but still need the rest set up */ | ||
7203 | if (!pipe_config->has_dsi_encoder) | ||
7204 | pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE | | ||
7205 | DPLL_EXT_BUFFER_ENABLE_VLV; | ||
7206 | |||
7181 | pipe_config->dpll_hw_state.dpll_md = | 7207 | pipe_config->dpll_hw_state.dpll_md = |
7182 | (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; | 7208 | (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; |
7183 | } | 7209 | } |
@@ -7186,11 +7212,14 @@ static void chv_compute_dpll(struct intel_crtc *crtc, | |||
7186 | struct intel_crtc_state *pipe_config) | 7212 | struct intel_crtc_state *pipe_config) |
7187 | { | 7213 | { |
7188 | pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV | | 7214 | pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV | |
7189 | DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS | | 7215 | DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; |
7190 | DPLL_VCO_ENABLE; | ||
7191 | if (crtc->pipe != PIPE_A) | 7216 | if (crtc->pipe != PIPE_A) |
7192 | pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; | 7217 | pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; |
7193 | 7218 | ||
7219 | /* DPLL not used with DSI, but still need the rest set up */ | ||
7220 | if (!pipe_config->has_dsi_encoder) | ||
7221 | pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE; | ||
7222 | |||
7194 | pipe_config->dpll_hw_state.dpll_md = | 7223 | pipe_config->dpll_hw_state.dpll_md = |
7195 | (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; | 7224 | (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; |
7196 | } | 7225 | } |
@@ -7200,11 +7229,20 @@ static void vlv_prepare_pll(struct intel_crtc *crtc, | |||
7200 | { | 7229 | { |
7201 | struct drm_device *dev = crtc->base.dev; | 7230 | struct drm_device *dev = crtc->base.dev; |
7202 | struct drm_i915_private *dev_priv = dev->dev_private; | 7231 | struct drm_i915_private *dev_priv = dev->dev_private; |
7203 | int pipe = crtc->pipe; | 7232 | enum pipe pipe = crtc->pipe; |
7204 | u32 mdiv; | 7233 | u32 mdiv; |
7205 | u32 bestn, bestm1, bestm2, bestp1, bestp2; | 7234 | u32 bestn, bestm1, bestm2, bestp1, bestp2; |
7206 | u32 coreclk, reg_val; | 7235 | u32 coreclk, reg_val; |
7207 | 7236 | ||
7237 | /* Enable Refclk */ | ||
7238 | I915_WRITE(DPLL(pipe), | ||
7239 | pipe_config->dpll_hw_state.dpll & | ||
7240 | ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); | ||
7241 | |||
7242 | /* No need to actually set up the DPLL with DSI */ | ||
7243 | if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) | ||
7244 | return; | ||
7245 | |||
7208 | mutex_lock(&dev_priv->sb_lock); | 7246 | mutex_lock(&dev_priv->sb_lock); |
7209 | 7247 | ||
7210 | bestn = pipe_config->dpll.n; | 7248 | bestn = pipe_config->dpll.n; |
@@ -7291,14 +7329,21 @@ static void chv_prepare_pll(struct intel_crtc *crtc, | |||
7291 | { | 7329 | { |
7292 | struct drm_device *dev = crtc->base.dev; | 7330 | struct drm_device *dev = crtc->base.dev; |
7293 | struct drm_i915_private *dev_priv = dev->dev_private; | 7331 | struct drm_i915_private *dev_priv = dev->dev_private; |
7294 | int pipe = crtc->pipe; | 7332 | enum pipe pipe = crtc->pipe; |
7295 | i915_reg_t dpll_reg = DPLL(crtc->pipe); | ||
7296 | enum dpio_channel port = vlv_pipe_to_channel(pipe); | 7333 | enum dpio_channel port = vlv_pipe_to_channel(pipe); |
7297 | u32 loopfilter, tribuf_calcntr; | 7334 | u32 loopfilter, tribuf_calcntr; |
7298 | u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; | 7335 | u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; |
7299 | u32 dpio_val; | 7336 | u32 dpio_val; |
7300 | int vco; | 7337 | int vco; |
7301 | 7338 | ||
7339 | /* Enable Refclk and SSC */ | ||
7340 | I915_WRITE(DPLL(pipe), | ||
7341 | pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); | ||
7342 | |||
7343 | /* No need to actually set up the DPLL with DSI */ | ||
7344 | if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) | ||
7345 | return; | ||
7346 | |||
7302 | bestn = pipe_config->dpll.n; | 7347 | bestn = pipe_config->dpll.n; |
7303 | bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; | 7348 | bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; |
7304 | bestm1 = pipe_config->dpll.m1; | 7349 | bestm1 = pipe_config->dpll.m1; |
@@ -7309,12 +7354,6 @@ static void chv_prepare_pll(struct intel_crtc *crtc, | |||
7309 | dpio_val = 0; | 7354 | dpio_val = 0; |
7310 | loopfilter = 0; | 7355 | loopfilter = 0; |
7311 | 7356 | ||
7312 | /* | ||
7313 | * Enable Refclk and SSC | ||
7314 | */ | ||
7315 | I915_WRITE(dpll_reg, | ||
7316 | pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); | ||
7317 | |||
7318 | mutex_lock(&dev_priv->sb_lock); | 7357 | mutex_lock(&dev_priv->sb_lock); |
7319 | 7358 | ||
7320 | /* p1 and p2 divider */ | 7359 | /* p1 and p2 divider */ |
@@ -7929,9 +7968,6 @@ static int chv_crtc_compute_clock(struct intel_crtc *crtc, | |||
7929 | memset(&crtc_state->dpll_hw_state, 0, | 7968 | memset(&crtc_state->dpll_hw_state, 0, |
7930 | sizeof(crtc_state->dpll_hw_state)); | 7969 | sizeof(crtc_state->dpll_hw_state)); |
7931 | 7970 | ||
7932 | if (crtc_state->has_dsi_encoder) | ||
7933 | return 0; | ||
7934 | |||
7935 | if (!crtc_state->clock_set && | 7971 | if (!crtc_state->clock_set && |
7936 | !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, | 7972 | !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, |
7937 | refclk, NULL, &crtc_state->dpll)) { | 7973 | refclk, NULL, &crtc_state->dpll)) { |
@@ -7953,9 +7989,6 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc, | |||
7953 | memset(&crtc_state->dpll_hw_state, 0, | 7989 | memset(&crtc_state->dpll_hw_state, 0, |
7954 | sizeof(crtc_state->dpll_hw_state)); | 7990 | sizeof(crtc_state->dpll_hw_state)); |
7955 | 7991 | ||
7956 | if (crtc_state->has_dsi_encoder) | ||
7957 | return 0; | ||
7958 | |||
7959 | if (!crtc_state->clock_set && | 7992 | if (!crtc_state->clock_set && |
7960 | !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, | 7993 | !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, |
7961 | refclk, NULL, &crtc_state->dpll)) { | 7994 | refclk, NULL, &crtc_state->dpll)) { |
@@ -8008,8 +8041,8 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc, | |||
8008 | u32 mdiv; | 8041 | u32 mdiv; |
8009 | int refclk = 100000; | 8042 | int refclk = 100000; |
8010 | 8043 | ||
8011 | /* In case of MIPI DPLL will not even be used */ | 8044 | /* In case of DSI, DPLL will not be used */ |
8012 | if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)) | 8045 | if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) |
8013 | return; | 8046 | return; |
8014 | 8047 | ||
8015 | mutex_lock(&dev_priv->sb_lock); | 8048 | mutex_lock(&dev_priv->sb_lock); |
@@ -8105,6 +8138,10 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc, | |||
8105 | u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; | 8138 | u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; |
8106 | int refclk = 100000; | 8139 | int refclk = 100000; |
8107 | 8140 | ||
8141 | /* In case of DSI, DPLL will not be used */ | ||
8142 | if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) | ||
8143 | return; | ||
8144 | |||
8108 | mutex_lock(&dev_priv->sb_lock); | 8145 | mutex_lock(&dev_priv->sb_lock); |
8109 | cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); | 8146 | cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); |
8110 | pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); | 8147 | pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); |
@@ -9533,7 +9570,7 @@ static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state) | |||
9533 | to_intel_atomic_state(old_state); | 9570 | to_intel_atomic_state(old_state); |
9534 | unsigned int req_cdclk = old_intel_state->dev_cdclk; | 9571 | unsigned int req_cdclk = old_intel_state->dev_cdclk; |
9535 | 9572 | ||
9536 | broxton_set_cdclk(dev, req_cdclk); | 9573 | broxton_set_cdclk(to_i915(dev), req_cdclk); |
9537 | } | 9574 | } |
9538 | 9575 | ||
9539 | /* compute the max rate for new configuration */ | 9576 | /* compute the max rate for new configuration */ |
@@ -10903,9 +10940,10 @@ static bool page_flip_finished(struct intel_crtc *crtc) | |||
10903 | { | 10940 | { |
10904 | struct drm_device *dev = crtc->base.dev; | 10941 | struct drm_device *dev = crtc->base.dev; |
10905 | struct drm_i915_private *dev_priv = dev->dev_private; | 10942 | struct drm_i915_private *dev_priv = dev->dev_private; |
10943 | unsigned reset_counter; | ||
10906 | 10944 | ||
10907 | if (i915_reset_in_progress(&dev_priv->gpu_error) || | 10945 | reset_counter = i915_reset_counter(&dev_priv->gpu_error); |
10908 | crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) | 10946 | if (crtc->reset_counter != reset_counter) |
10909 | return true; | 10947 | return true; |
10910 | 10948 | ||
10911 | /* | 10949 | /* |
@@ -11359,7 +11397,6 @@ static void intel_mmio_flip_work_func(struct work_struct *work) | |||
11359 | 11397 | ||
11360 | if (mmio_flip->req) { | 11398 | if (mmio_flip->req) { |
11361 | WARN_ON(__i915_wait_request(mmio_flip->req, | 11399 | WARN_ON(__i915_wait_request(mmio_flip->req, |
11362 | mmio_flip->crtc->reset_counter, | ||
11363 | false, NULL, | 11400 | false, NULL, |
11364 | &mmio_flip->i915->rps.mmioflips)); | 11401 | &mmio_flip->i915->rps.mmioflips)); |
11365 | i915_gem_request_unreference__unlocked(mmio_flip->req); | 11402 | i915_gem_request_unreference__unlocked(mmio_flip->req); |
@@ -11567,8 +11604,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
11567 | if (ret) | 11604 | if (ret) |
11568 | goto cleanup; | 11605 | goto cleanup; |
11569 | 11606 | ||
11607 | intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error); | ||
11608 | if (__i915_reset_in_progress_or_wedged(intel_crtc->reset_counter)) { | ||
11609 | ret = -EIO; | ||
11610 | goto cleanup; | ||
11611 | } | ||
11612 | |||
11570 | atomic_inc(&intel_crtc->unpin_work_count); | 11613 | atomic_inc(&intel_crtc->unpin_work_count); |
11571 | intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); | ||
11572 | 11614 | ||
11573 | if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) | 11615 | if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) |
11574 | work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1; | 11616 | work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1; |
@@ -11654,7 +11696,7 @@ cleanup_unpin: | |||
11654 | intel_unpin_fb_obj(fb, crtc->primary->state->rotation); | 11696 | intel_unpin_fb_obj(fb, crtc->primary->state->rotation); |
11655 | cleanup_pending: | 11697 | cleanup_pending: |
11656 | if (!IS_ERR_OR_NULL(request)) | 11698 | if (!IS_ERR_OR_NULL(request)) |
11657 | i915_gem_request_cancel(request); | 11699 | i915_add_request_no_flush(request); |
11658 | atomic_dec(&intel_crtc->unpin_work_count); | 11700 | atomic_dec(&intel_crtc->unpin_work_count); |
11659 | mutex_unlock(&dev->struct_mutex); | 11701 | mutex_unlock(&dev->struct_mutex); |
11660 | cleanup: | 11702 | cleanup: |
@@ -11704,7 +11746,7 @@ retry: | |||
11704 | 11746 | ||
11705 | if (ret == 0 && event) { | 11747 | if (ret == 0 && event) { |
11706 | spin_lock_irq(&dev->event_lock); | 11748 | spin_lock_irq(&dev->event_lock); |
11707 | drm_send_vblank_event(dev, pipe, event); | 11749 | drm_crtc_send_vblank_event(crtc, event); |
11708 | spin_unlock_irq(&dev->event_lock); | 11750 | spin_unlock_irq(&dev->event_lock); |
11709 | } | 11751 | } |
11710 | } | 11752 | } |
@@ -12686,7 +12728,7 @@ intel_pipe_config_compare(struct drm_device *dev, | |||
12686 | PIPE_CONF_CHECK_X(gmch_pfit.control); | 12728 | PIPE_CONF_CHECK_X(gmch_pfit.control); |
12687 | /* pfit ratios are autocomputed by the hw on gen4+ */ | 12729 | /* pfit ratios are autocomputed by the hw on gen4+ */ |
12688 | if (INTEL_INFO(dev)->gen < 4) | 12730 | if (INTEL_INFO(dev)->gen < 4) |
12689 | PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); | 12731 | PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); |
12690 | PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); | 12732 | PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); |
12691 | 12733 | ||
12692 | if (!adjust) { | 12734 | if (!adjust) { |
@@ -12721,6 +12763,9 @@ intel_pipe_config_compare(struct drm_device *dev, | |||
12721 | PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); | 12763 | PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); |
12722 | PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); | 12764 | PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); |
12723 | 12765 | ||
12766 | PIPE_CONF_CHECK_X(dsi_pll.ctrl); | ||
12767 | PIPE_CONF_CHECK_X(dsi_pll.div); | ||
12768 | |||
12724 | if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) | 12769 | if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) |
12725 | PIPE_CONF_CHECK_I(pipe_bpp); | 12770 | PIPE_CONF_CHECK_I(pipe_bpp); |
12726 | 12771 | ||
@@ -13401,6 +13446,9 @@ static int intel_atomic_prepare_commit(struct drm_device *dev, | |||
13401 | } | 13446 | } |
13402 | 13447 | ||
13403 | for_each_crtc_in_state(state, crtc, crtc_state, i) { | 13448 | for_each_crtc_in_state(state, crtc, crtc_state, i) { |
13449 | if (state->legacy_cursor_update) | ||
13450 | continue; | ||
13451 | |||
13404 | ret = intel_crtc_wait_for_pending_flips(crtc); | 13452 | ret = intel_crtc_wait_for_pending_flips(crtc); |
13405 | if (ret) | 13453 | if (ret) |
13406 | return ret; | 13454 | return ret; |
@@ -13414,12 +13462,9 @@ static int intel_atomic_prepare_commit(struct drm_device *dev, | |||
13414 | return ret; | 13462 | return ret; |
13415 | 13463 | ||
13416 | ret = drm_atomic_helper_prepare_planes(dev, state); | 13464 | ret = drm_atomic_helper_prepare_planes(dev, state); |
13417 | if (!ret && !async && !i915_reset_in_progress(&dev_priv->gpu_error)) { | 13465 | mutex_unlock(&dev->struct_mutex); |
13418 | u32 reset_counter; | ||
13419 | |||
13420 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); | ||
13421 | mutex_unlock(&dev->struct_mutex); | ||
13422 | 13466 | ||
13467 | if (!ret && !async) { | ||
13423 | for_each_plane_in_state(state, plane, plane_state, i) { | 13468 | for_each_plane_in_state(state, plane, plane_state, i) { |
13424 | struct intel_plane_state *intel_plane_state = | 13469 | struct intel_plane_state *intel_plane_state = |
13425 | to_intel_plane_state(plane_state); | 13470 | to_intel_plane_state(plane_state); |
@@ -13428,25 +13473,18 @@ static int intel_atomic_prepare_commit(struct drm_device *dev, | |||
13428 | continue; | 13473 | continue; |
13429 | 13474 | ||
13430 | ret = __i915_wait_request(intel_plane_state->wait_req, | 13475 | ret = __i915_wait_request(intel_plane_state->wait_req, |
13431 | reset_counter, true, | 13476 | true, NULL, NULL); |
13432 | NULL, NULL); | 13477 | if (ret) { |
13433 | 13478 | /* Any hang should be swallowed by the wait */ | |
13434 | /* Swallow -EIO errors to allow updates during hw lockup. */ | 13479 | WARN_ON(ret == -EIO); |
13435 | if (ret == -EIO) | 13480 | mutex_lock(&dev->struct_mutex); |
13436 | ret = 0; | 13481 | drm_atomic_helper_cleanup_planes(dev, state); |
13437 | 13482 | mutex_unlock(&dev->struct_mutex); | |
13438 | if (ret) | ||
13439 | break; | 13483 | break; |
13484 | } | ||
13440 | } | 13485 | } |
13441 | |||
13442 | if (!ret) | ||
13443 | return 0; | ||
13444 | |||
13445 | mutex_lock(&dev->struct_mutex); | ||
13446 | drm_atomic_helper_cleanup_planes(dev, state); | ||
13447 | } | 13486 | } |
13448 | 13487 | ||
13449 | mutex_unlock(&dev->struct_mutex); | ||
13450 | return ret; | 13488 | return ret; |
13451 | } | 13489 | } |
13452 | 13490 | ||
@@ -13488,7 +13526,7 @@ static void intel_atomic_wait_for_vblanks(struct drm_device *dev, | |||
13488 | drm_crtc_vblank_count(crtc), | 13526 | drm_crtc_vblank_count(crtc), |
13489 | msecs_to_jiffies(50)); | 13527 | msecs_to_jiffies(50)); |
13490 | 13528 | ||
13491 | WARN_ON(!lret); | 13529 | WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe)); |
13492 | 13530 | ||
13493 | drm_crtc_vblank_put(crtc); | 13531 | drm_crtc_vblank_put(crtc); |
13494 | } | 13532 | } |
@@ -13790,10 +13828,11 @@ intel_prepare_plane_fb(struct drm_plane *plane, | |||
13790 | */ | 13828 | */ |
13791 | if (needs_modeset(crtc_state)) | 13829 | if (needs_modeset(crtc_state)) |
13792 | ret = i915_gem_object_wait_rendering(old_obj, true); | 13830 | ret = i915_gem_object_wait_rendering(old_obj, true); |
13793 | 13831 | if (ret) { | |
13794 | /* Swallow -EIO errors to allow updates during hw lockup. */ | 13832 | /* GPU hangs should have been swallowed by the wait */ |
13795 | if (ret && ret != -EIO) | 13833 | WARN_ON(ret == -EIO); |
13796 | return ret; | 13834 | return ret; |
13835 | } | ||
13797 | } | 13836 | } |
13798 | 13837 | ||
13799 | /* For framebuffer backed by dmabuf, wait for fence */ | 13838 | /* For framebuffer backed by dmabuf, wait for fence */ |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index da0c3d29fda8..a3fc49430c26 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -2215,6 +2215,15 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp) | |||
2215 | POSTING_READ(DP_A); | 2215 | POSTING_READ(DP_A); |
2216 | udelay(500); | 2216 | udelay(500); |
2217 | 2217 | ||
2218 | /* | ||
2219 | * [DevILK] Work around required when enabling DP PLL | ||
2220 | * while a pipe is enabled going to FDI: | ||
2221 | * 1. Wait for the start of vertical blank on the enabled pipe going to FDI | ||
2222 | * 2. Program DP PLL enable | ||
2223 | */ | ||
2224 | if (IS_GEN5(dev_priv)) | ||
2225 | intel_wait_for_vblank_if_active(dev_priv->dev, !crtc->pipe); | ||
2226 | |||
2218 | intel_dp->DP |= DP_PLL_ENABLE; | 2227 | intel_dp->DP |= DP_PLL_ENABLE; |
2219 | 2228 | ||
2220 | I915_WRITE(DP_A, intel_dp->DP); | 2229 | I915_WRITE(DP_A, intel_dp->DP); |
@@ -2630,7 +2639,6 @@ static void intel_enable_dp(struct intel_encoder *encoder) | |||
2630 | struct drm_i915_private *dev_priv = dev->dev_private; | 2639 | struct drm_i915_private *dev_priv = dev->dev_private; |
2631 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); | 2640 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); |
2632 | uint32_t dp_reg = I915_READ(intel_dp->output_reg); | 2641 | uint32_t dp_reg = I915_READ(intel_dp->output_reg); |
2633 | enum port port = dp_to_dig_port(intel_dp)->port; | ||
2634 | enum pipe pipe = crtc->pipe; | 2642 | enum pipe pipe = crtc->pipe; |
2635 | 2643 | ||
2636 | if (WARN_ON(dp_reg & DP_PORT_EN)) | 2644 | if (WARN_ON(dp_reg & DP_PORT_EN)) |
@@ -2641,35 +2649,12 @@ static void intel_enable_dp(struct intel_encoder *encoder) | |||
2641 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) | 2649 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
2642 | vlv_init_panel_power_sequencer(intel_dp); | 2650 | vlv_init_panel_power_sequencer(intel_dp); |
2643 | 2651 | ||
2644 | /* | ||
2645 | * We get an occasional spurious underrun between the port | ||
2646 | * enable and vdd enable, when enabling port A eDP. | ||
2647 | * | ||
2648 | * FIXME: Not sure if this applies to (PCH) port D eDP as well | ||
2649 | */ | ||
2650 | if (port == PORT_A) | ||
2651 | intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); | ||
2652 | |||
2653 | intel_dp_enable_port(intel_dp); | 2652 | intel_dp_enable_port(intel_dp); |
2654 | 2653 | ||
2655 | if (port == PORT_A && IS_GEN5(dev_priv)) { | ||
2656 | /* | ||
2657 | * Underrun reporting for the other pipe was disabled in | ||
2658 | * g4x_pre_enable_dp(). The eDP PLL and port have now been | ||
2659 | * enabled, so it's now safe to re-enable underrun reporting. | ||
2660 | */ | ||
2661 | intel_wait_for_vblank_if_active(dev_priv->dev, !pipe); | ||
2662 | intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true); | ||
2663 | intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true); | ||
2664 | } | ||
2665 | |||
2666 | edp_panel_vdd_on(intel_dp); | 2654 | edp_panel_vdd_on(intel_dp); |
2667 | edp_panel_on(intel_dp); | 2655 | edp_panel_on(intel_dp); |
2668 | edp_panel_vdd_off(intel_dp, true); | 2656 | edp_panel_vdd_off(intel_dp, true); |
2669 | 2657 | ||
2670 | if (port == PORT_A) | ||
2671 | intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); | ||
2672 | |||
2673 | pps_unlock(intel_dp); | 2658 | pps_unlock(intel_dp); |
2674 | 2659 | ||
2675 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { | 2660 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
@@ -2711,26 +2696,11 @@ static void vlv_enable_dp(struct intel_encoder *encoder) | |||
2711 | 2696 | ||
2712 | static void g4x_pre_enable_dp(struct intel_encoder *encoder) | 2697 | static void g4x_pre_enable_dp(struct intel_encoder *encoder) |
2713 | { | 2698 | { |
2714 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
2715 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | 2699 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
2716 | enum port port = dp_to_dig_port(intel_dp)->port; | 2700 | enum port port = dp_to_dig_port(intel_dp)->port; |
2717 | enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe; | ||
2718 | 2701 | ||
2719 | intel_dp_prepare(encoder); | 2702 | intel_dp_prepare(encoder); |
2720 | 2703 | ||
2721 | if (port == PORT_A && IS_GEN5(dev_priv)) { | ||
2722 | /* | ||
2723 | * We get FIFO underruns on the other pipe when | ||
2724 | * enabling the CPU eDP PLL, and when enabling CPU | ||
2725 | * eDP port. We could potentially avoid the PLL | ||
2726 | * underrun with a vblank wait just prior to enabling | ||
2727 | * the PLL, but that doesn't appear to help the port | ||
2728 | * enable case. Just sweep it all under the rug. | ||
2729 | */ | ||
2730 | intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false); | ||
2731 | intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false); | ||
2732 | } | ||
2733 | |||
2734 | /* Only ilk+ has port A */ | 2704 | /* Only ilk+ has port A */ |
2735 | if (port == PORT_A) | 2705 | if (port == PORT_A) |
2736 | ironlake_edp_pll_on(intel_dp); | 2706 | ironlake_edp_pll_on(intel_dp); |
@@ -3806,7 +3776,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) | |||
3806 | * downstream port information. So, an early return here saves | 3776 | * downstream port information. So, an early return here saves |
3807 | * time from performing other operations which are not required. | 3777 | * time from performing other operations which are not required. |
3808 | */ | 3778 | */ |
3809 | if (!intel_dp->sink_count) | 3779 | if (!is_edp(intel_dp) && !intel_dp->sink_count) |
3810 | return false; | 3780 | return false; |
3811 | 3781 | ||
3812 | /* Check if the panel supports PSR */ | 3782 | /* Check if the panel supports PSR */ |
@@ -4339,6 +4309,9 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp) | |||
4339 | if (!intel_dp_get_dpcd(intel_dp)) | 4309 | if (!intel_dp_get_dpcd(intel_dp)) |
4340 | return connector_status_disconnected; | 4310 | return connector_status_disconnected; |
4341 | 4311 | ||
4312 | if (is_edp(intel_dp)) | ||
4313 | return connector_status_connected; | ||
4314 | |||
4342 | /* if there's no downstream port, we're done */ | 4315 | /* if there's no downstream port, we're done */ |
4343 | if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) | 4316 | if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) |
4344 | return connector_status_connected; | 4317 | return connector_status_connected; |
@@ -4608,6 +4581,15 @@ intel_dp_long_pulse(struct intel_connector *intel_connector) | |||
4608 | intel_dp->compliance_test_type = 0; | 4581 | intel_dp->compliance_test_type = 0; |
4609 | intel_dp->compliance_test_data = 0; | 4582 | intel_dp->compliance_test_data = 0; |
4610 | 4583 | ||
4584 | if (intel_dp->is_mst) { | ||
4585 | DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", | ||
4586 | intel_dp->is_mst, | ||
4587 | intel_dp->mst_mgr.mst_state); | ||
4588 | intel_dp->is_mst = false; | ||
4589 | drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, | ||
4590 | intel_dp->is_mst); | ||
4591 | } | ||
4592 | |||
4611 | goto out; | 4593 | goto out; |
4612 | } | 4594 | } |
4613 | 4595 | ||
@@ -4665,20 +4647,9 @@ intel_dp_long_pulse(struct intel_connector *intel_connector) | |||
4665 | } | 4647 | } |
4666 | 4648 | ||
4667 | out: | 4649 | out: |
4668 | if (status != connector_status_connected) { | 4650 | if ((status != connector_status_connected) && |
4651 | (intel_dp->is_mst == false)) | ||
4669 | intel_dp_unset_edid(intel_dp); | 4652 | intel_dp_unset_edid(intel_dp); |
4670 | /* | ||
4671 | * If we were in MST mode, and device is not there, | ||
4672 | * get out of MST mode | ||
4673 | */ | ||
4674 | if (intel_dp->is_mst) { | ||
4675 | DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", | ||
4676 | intel_dp->is_mst, intel_dp->mst_mgr.mst_state); | ||
4677 | intel_dp->is_mst = false; | ||
4678 | drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, | ||
4679 | intel_dp->is_mst); | ||
4680 | } | ||
4681 | } | ||
4682 | 4653 | ||
4683 | intel_display_power_put(to_i915(dev), power_domain); | 4654 | intel_display_power_put(to_i915(dev), power_domain); |
4684 | return; | 4655 | return; |
@@ -4851,6 +4822,11 @@ intel_dp_set_property(struct drm_connector *connector, | |||
4851 | DRM_DEBUG_KMS("no scaling not supported\n"); | 4822 | DRM_DEBUG_KMS("no scaling not supported\n"); |
4852 | return -EINVAL; | 4823 | return -EINVAL; |
4853 | } | 4824 | } |
4825 | if (HAS_GMCH_DISPLAY(dev_priv) && | ||
4826 | val == DRM_MODE_SCALE_CENTER) { | ||
4827 | DRM_DEBUG_KMS("centering not supported\n"); | ||
4828 | return -EINVAL; | ||
4829 | } | ||
4854 | 4830 | ||
4855 | if (intel_connector->panel.fitting_mode == val) { | 4831 | if (intel_connector->panel.fitting_mode == val) { |
4856 | /* the eDP scaling property is not changed */ | 4832 | /* the eDP scaling property is not changed */ |
@@ -4914,7 +4890,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) | |||
4914 | kfree(intel_dig_port); | 4890 | kfree(intel_dig_port); |
4915 | } | 4891 | } |
4916 | 4892 | ||
4917 | static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) | 4893 | void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) |
4918 | { | 4894 | { |
4919 | struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); | 4895 | struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); |
4920 | 4896 | ||
@@ -4956,7 +4932,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) | |||
4956 | edp_panel_vdd_schedule_off(intel_dp); | 4932 | edp_panel_vdd_schedule_off(intel_dp); |
4957 | } | 4933 | } |
4958 | 4934 | ||
4959 | static void intel_dp_encoder_reset(struct drm_encoder *encoder) | 4935 | void intel_dp_encoder_reset(struct drm_encoder *encoder) |
4960 | { | 4936 | { |
4961 | struct intel_dp *intel_dp; | 4937 | struct intel_dp *intel_dp; |
4962 | 4938 | ||
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 0bde6a4259fd..639bf0209c15 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c | |||
@@ -1295,17 +1295,9 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, | |||
1295 | uint32_t temp; | 1295 | uint32_t temp; |
1296 | enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */ | 1296 | enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */ |
1297 | 1297 | ||
1298 | temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); | ||
1299 | /* | ||
1300 | * Definition of each bit polarity has been changed | ||
1301 | * after A1 stepping | ||
1302 | */ | ||
1303 | if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) | ||
1304 | temp &= ~PORT_PLL_REF_SEL; | ||
1305 | else | ||
1306 | temp |= PORT_PLL_REF_SEL; | ||
1307 | |||
1308 | /* Non-SSC reference */ | 1298 | /* Non-SSC reference */ |
1299 | temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); | ||
1300 | temp |= PORT_PLL_REF_SEL; | ||
1309 | I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); | 1301 | I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); |
1310 | 1302 | ||
1311 | /* Disable 10 bit clock */ | 1303 | /* Disable 10 bit clock */ |
@@ -1652,10 +1644,7 @@ static void intel_ddi_pll_init(struct drm_device *dev) | |||
1652 | DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n"); | 1644 | DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n"); |
1653 | if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) | 1645 | if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) |
1654 | DRM_ERROR("LCPLL1 is disabled\n"); | 1646 | DRM_ERROR("LCPLL1 is disabled\n"); |
1655 | } else if (IS_BROXTON(dev)) { | 1647 | } else if (!IS_BROXTON(dev_priv)) { |
1656 | broxton_init_cdclk(dev); | ||
1657 | broxton_ddi_phy_init(dev); | ||
1658 | } else { | ||
1659 | /* | 1648 | /* |
1660 | * The LCPLL register should be turned on by the BIOS. For now | 1649 | * The LCPLL register should be turned on by the BIOS. For now |
1661 | * let's just check its state and print errors in case | 1650 | * let's just check its state and print errors in case |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index e0fcfa1683cc..b9f1304439e2 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -497,6 +497,11 @@ struct intel_crtc_state { | |||
497 | /* Actual register state of the dpll, for shared dpll cross-checking. */ | 497 | /* Actual register state of the dpll, for shared dpll cross-checking. */ |
498 | struct intel_dpll_hw_state dpll_hw_state; | 498 | struct intel_dpll_hw_state dpll_hw_state; |
499 | 499 | ||
500 | /* DSI PLL registers */ | ||
501 | struct { | ||
502 | u32 ctrl, div; | ||
503 | } dsi_pll; | ||
504 | |||
500 | int pipe_bpp; | 505 | int pipe_bpp; |
501 | struct intel_link_m_n dp_m_n; | 506 | struct intel_link_m_n dp_m_n; |
502 | 507 | ||
@@ -1224,12 +1229,16 @@ void intel_prepare_reset(struct drm_device *dev); | |||
1224 | void intel_finish_reset(struct drm_device *dev); | 1229 | void intel_finish_reset(struct drm_device *dev); |
1225 | void hsw_enable_pc8(struct drm_i915_private *dev_priv); | 1230 | void hsw_enable_pc8(struct drm_i915_private *dev_priv); |
1226 | void hsw_disable_pc8(struct drm_i915_private *dev_priv); | 1231 | void hsw_disable_pc8(struct drm_i915_private *dev_priv); |
1227 | void broxton_init_cdclk(struct drm_device *dev); | 1232 | void broxton_init_cdclk(struct drm_i915_private *dev_priv); |
1228 | void broxton_uninit_cdclk(struct drm_device *dev); | 1233 | void broxton_uninit_cdclk(struct drm_i915_private *dev_priv); |
1229 | void broxton_ddi_phy_init(struct drm_device *dev); | 1234 | bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv); |
1230 | void broxton_ddi_phy_uninit(struct drm_device *dev); | 1235 | void broxton_ddi_phy_init(struct drm_i915_private *dev_priv); |
1236 | void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv); | ||
1237 | void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv); | ||
1238 | void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv); | ||
1231 | void bxt_enable_dc9(struct drm_i915_private *dev_priv); | 1239 | void bxt_enable_dc9(struct drm_i915_private *dev_priv); |
1232 | void bxt_disable_dc9(struct drm_i915_private *dev_priv); | 1240 | void bxt_disable_dc9(struct drm_i915_private *dev_priv); |
1241 | void gen9_enable_dc5(struct drm_i915_private *dev_priv); | ||
1233 | void skl_init_cdclk(struct drm_i915_private *dev_priv); | 1242 | void skl_init_cdclk(struct drm_i915_private *dev_priv); |
1234 | int skl_sanitize_cdclk(struct drm_i915_private *dev_priv); | 1243 | int skl_sanitize_cdclk(struct drm_i915_private *dev_priv); |
1235 | void skl_uninit_cdclk(struct drm_i915_private *dev_priv); | 1244 | void skl_uninit_cdclk(struct drm_i915_private *dev_priv); |
@@ -1268,6 +1277,8 @@ u32 skl_plane_ctl_rotation(unsigned int rotation); | |||
1268 | void intel_csr_ucode_init(struct drm_i915_private *); | 1277 | void intel_csr_ucode_init(struct drm_i915_private *); |
1269 | void intel_csr_load_program(struct drm_i915_private *); | 1278 | void intel_csr_load_program(struct drm_i915_private *); |
1270 | void intel_csr_ucode_fini(struct drm_i915_private *); | 1279 | void intel_csr_ucode_fini(struct drm_i915_private *); |
1280 | void intel_csr_ucode_suspend(struct drm_i915_private *); | ||
1281 | void intel_csr_ucode_resume(struct drm_i915_private *); | ||
1271 | 1282 | ||
1272 | /* intel_dp.c */ | 1283 | /* intel_dp.c */ |
1273 | void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port); | 1284 | void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port); |
@@ -1278,6 +1289,8 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp, | |||
1278 | void intel_dp_start_link_train(struct intel_dp *intel_dp); | 1289 | void intel_dp_start_link_train(struct intel_dp *intel_dp); |
1279 | void intel_dp_stop_link_train(struct intel_dp *intel_dp); | 1290 | void intel_dp_stop_link_train(struct intel_dp *intel_dp); |
1280 | void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); | 1291 | void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); |
1292 | void intel_dp_encoder_reset(struct drm_encoder *encoder); | ||
1293 | void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder); | ||
1281 | void intel_dp_encoder_destroy(struct drm_encoder *encoder); | 1294 | void intel_dp_encoder_destroy(struct drm_encoder *encoder); |
1282 | int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc); | 1295 | int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc); |
1283 | bool intel_dp_compute_config(struct intel_encoder *encoder, | 1296 | bool intel_dp_compute_config(struct intel_encoder *encoder, |
@@ -1462,8 +1475,8 @@ int intel_power_domains_init(struct drm_i915_private *); | |||
1462 | void intel_power_domains_fini(struct drm_i915_private *); | 1475 | void intel_power_domains_fini(struct drm_i915_private *); |
1463 | void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume); | 1476 | void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume); |
1464 | void intel_power_domains_suspend(struct drm_i915_private *dev_priv); | 1477 | void intel_power_domains_suspend(struct drm_i915_private *dev_priv); |
1465 | void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv); | 1478 | void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume); |
1466 | void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv); | 1479 | void bxt_display_core_uninit(struct drm_i915_private *dev_priv); |
1467 | void intel_runtime_pm_enable(struct drm_i915_private *dev_priv); | 1480 | void intel_runtime_pm_enable(struct drm_i915_private *dev_priv); |
1468 | const char * | 1481 | const char * |
1469 | intel_display_power_domain_str(enum intel_display_power_domain domain); | 1482 | intel_display_power_domain_str(enum intel_display_power_domain domain); |
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index a1e0547484ae..2b22bb9bb86f 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c | |||
@@ -290,16 +290,26 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder, | |||
290 | struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, | 290 | struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, |
291 | base); | 291 | base); |
292 | struct intel_connector *intel_connector = intel_dsi->attached_connector; | 292 | struct intel_connector *intel_connector = intel_dsi->attached_connector; |
293 | struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; | 293 | struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); |
294 | const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; | ||
294 | struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; | 295 | struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; |
296 | int ret; | ||
295 | 297 | ||
296 | DRM_DEBUG_KMS("\n"); | 298 | DRM_DEBUG_KMS("\n"); |
297 | 299 | ||
298 | pipe_config->has_dsi_encoder = true; | 300 | pipe_config->has_dsi_encoder = true; |
299 | 301 | ||
300 | if (fixed_mode) | 302 | if (fixed_mode) { |
301 | intel_fixed_panel_mode(fixed_mode, adjusted_mode); | 303 | intel_fixed_panel_mode(fixed_mode, adjusted_mode); |
302 | 304 | ||
305 | if (HAS_GMCH_DISPLAY(dev_priv)) | ||
306 | intel_gmch_panel_fitting(crtc, pipe_config, | ||
307 | intel_connector->panel.fitting_mode); | ||
308 | else | ||
309 | intel_pch_panel_fitting(crtc, pipe_config, | ||
310 | intel_connector->panel.fitting_mode); | ||
311 | } | ||
312 | |||
303 | /* DSI uses short packets for sync events, so clear mode flags for DSI */ | 313 | /* DSI uses short packets for sync events, so clear mode flags for DSI */ |
304 | adjusted_mode->flags = 0; | 314 | adjusted_mode->flags = 0; |
305 | 315 | ||
@@ -311,6 +321,12 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder, | |||
311 | pipe_config->cpu_transcoder = TRANSCODER_DSI_A; | 321 | pipe_config->cpu_transcoder = TRANSCODER_DSI_A; |
312 | } | 322 | } |
313 | 323 | ||
324 | ret = intel_compute_dsi_pll(encoder, pipe_config); | ||
325 | if (ret) | ||
326 | return false; | ||
327 | |||
328 | pipe_config->clock_set = true; | ||
329 | |||
314 | return true; | 330 | return true; |
315 | } | 331 | } |
316 | 332 | ||
@@ -498,14 +514,19 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder) | |||
498 | struct drm_device *dev = encoder->base.dev; | 514 | struct drm_device *dev = encoder->base.dev; |
499 | struct drm_i915_private *dev_priv = dev->dev_private; | 515 | struct drm_i915_private *dev_priv = dev->dev_private; |
500 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | 516 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); |
501 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); | 517 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); |
502 | enum pipe pipe = intel_crtc->pipe; | ||
503 | enum port port; | 518 | enum port port; |
504 | u32 tmp; | 519 | u32 tmp; |
505 | 520 | ||
506 | DRM_DEBUG_KMS("\n"); | 521 | DRM_DEBUG_KMS("\n"); |
507 | 522 | ||
508 | intel_enable_dsi_pll(encoder); | 523 | /* |
524 | * The BIOS may leave the PLL in a wonky state where it doesn't | ||
525 | * lock. It needs to be fully powered down to fix it. | ||
526 | */ | ||
527 | intel_disable_dsi_pll(encoder); | ||
528 | intel_enable_dsi_pll(encoder, crtc->config); | ||
529 | |||
509 | intel_dsi_prepare(encoder); | 530 | intel_dsi_prepare(encoder); |
510 | 531 | ||
511 | /* Panel Enable over CRC PMIC */ | 532 | /* Panel Enable over CRC PMIC */ |
@@ -515,19 +536,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder) | |||
515 | msleep(intel_dsi->panel_on_delay); | 536 | msleep(intel_dsi->panel_on_delay); |
516 | 537 | ||
517 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { | 538 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
518 | /* | 539 | /* Disable DPOunit clock gating, can stall pipe */ |
519 | * Disable DPOunit clock gating, can stall pipe | ||
520 | * and we need DPLL REFA always enabled | ||
521 | */ | ||
522 | tmp = I915_READ(DPLL(pipe)); | ||
523 | tmp |= DPLL_REF_CLK_ENABLE_VLV; | ||
524 | I915_WRITE(DPLL(pipe), tmp); | ||
525 | |||
526 | /* update the hw state for DPLL */ | ||
527 | intel_crtc->config->dpll_hw_state.dpll = | ||
528 | DPLL_INTEGRATED_REF_CLK_VLV | | ||
529 | DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; | ||
530 | |||
531 | tmp = I915_READ(DSPCLK_GATE_D); | 540 | tmp = I915_READ(DSPCLK_GATE_D); |
532 | tmp |= DPOUNIT_CLOCK_GATE_DISABLE; | 541 | tmp |= DPOUNIT_CLOCK_GATE_DISABLE; |
533 | I915_WRITE(DSPCLK_GATE_D, tmp); | 542 | I915_WRITE(DSPCLK_GATE_D, tmp); |
@@ -679,11 +688,16 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder) | |||
679 | drm_panel_unprepare(intel_dsi->panel); | 688 | drm_panel_unprepare(intel_dsi->panel); |
680 | 689 | ||
681 | msleep(intel_dsi->panel_off_delay); | 690 | msleep(intel_dsi->panel_off_delay); |
682 | msleep(intel_dsi->panel_pwr_cycle_delay); | ||
683 | 691 | ||
684 | /* Panel Disable over CRC PMIC */ | 692 | /* Panel Disable over CRC PMIC */ |
685 | if (intel_dsi->gpio_panel) | 693 | if (intel_dsi->gpio_panel) |
686 | gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0); | 694 | gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0); |
695 | |||
696 | /* | ||
697 | * FIXME As we do with eDP, just make a note of the time here | ||
698 | * and perform the wait before the next panel power on. | ||
699 | */ | ||
700 | msleep(intel_dsi->panel_pwr_cycle_delay); | ||
687 | } | 701 | } |
688 | 702 | ||
689 | static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, | 703 | static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, |
@@ -716,11 +730,12 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, | |||
716 | BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); | 730 | BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); |
717 | bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE; | 731 | bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE; |
718 | 732 | ||
719 | /* Due to some hardware limitations on BYT, MIPI Port C DPI | 733 | /* |
720 | * Enable bit does not get set. To check whether DSI Port C | 734 | * Due to some hardware limitations on VLV/CHV, the DPI enable |
721 | * was enabled in BIOS, check the Pipe B enable bit | 735 | * bit in port C control register does not get set. As a |
736 | * workaround, check pipe B conf instead. | ||
722 | */ | 737 | */ |
723 | if (IS_VALLEYVIEW(dev) && port == PORT_C) | 738 | if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && port == PORT_C) |
724 | enabled = I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE; | 739 | enabled = I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE; |
725 | 740 | ||
726 | /* Try command mode if video mode not enabled */ | 741 | /* Try command mode if video mode not enabled */ |
@@ -826,13 +841,8 @@ static void intel_dsi_get_config(struct intel_encoder *encoder, | |||
826 | if (IS_BROXTON(dev)) | 841 | if (IS_BROXTON(dev)) |
827 | bxt_dsi_get_pipe_config(encoder, pipe_config); | 842 | bxt_dsi_get_pipe_config(encoder, pipe_config); |
828 | 843 | ||
829 | /* | 844 | pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp, |
830 | * DPLL_MD is not used in case of DSI, reading will get some default value | 845 | pipe_config); |
831 | * set dpll_md = 0 | ||
832 | */ | ||
833 | pipe_config->dpll_hw_state.dpll_md = 0; | ||
834 | |||
835 | pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp); | ||
836 | if (!pclk) | 846 | if (!pclk) |
837 | return; | 847 | return; |
838 | 848 | ||
@@ -845,7 +855,7 @@ intel_dsi_mode_valid(struct drm_connector *connector, | |||
845 | struct drm_display_mode *mode) | 855 | struct drm_display_mode *mode) |
846 | { | 856 | { |
847 | struct intel_connector *intel_connector = to_intel_connector(connector); | 857 | struct intel_connector *intel_connector = to_intel_connector(connector); |
848 | struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; | 858 | const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; |
849 | int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; | 859 | int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; |
850 | 860 | ||
851 | DRM_DEBUG_KMS("\n"); | 861 | DRM_DEBUG_KMS("\n"); |
@@ -1183,6 +1193,48 @@ static int intel_dsi_get_modes(struct drm_connector *connector) | |||
1183 | return 1; | 1193 | return 1; |
1184 | } | 1194 | } |
1185 | 1195 | ||
1196 | static int intel_dsi_set_property(struct drm_connector *connector, | ||
1197 | struct drm_property *property, | ||
1198 | uint64_t val) | ||
1199 | { | ||
1200 | struct drm_device *dev = connector->dev; | ||
1201 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
1202 | struct drm_crtc *crtc; | ||
1203 | int ret; | ||
1204 | |||
1205 | ret = drm_object_property_set_value(&connector->base, property, val); | ||
1206 | if (ret) | ||
1207 | return ret; | ||
1208 | |||
1209 | if (property == dev->mode_config.scaling_mode_property) { | ||
1210 | if (val == DRM_MODE_SCALE_NONE) { | ||
1211 | DRM_DEBUG_KMS("no scaling not supported\n"); | ||
1212 | return -EINVAL; | ||
1213 | } | ||
1214 | if (HAS_GMCH_DISPLAY(dev) && | ||
1215 | val == DRM_MODE_SCALE_CENTER) { | ||
1216 | DRM_DEBUG_KMS("centering not supported\n"); | ||
1217 | return -EINVAL; | ||
1218 | } | ||
1219 | |||
1220 | if (intel_connector->panel.fitting_mode == val) | ||
1221 | return 0; | ||
1222 | |||
1223 | intel_connector->panel.fitting_mode = val; | ||
1224 | } | ||
1225 | |||
1226 | crtc = intel_attached_encoder(connector)->base.crtc; | ||
1227 | if (crtc && crtc->state->enable) { | ||
1228 | /* | ||
1229 | * If the CRTC is enabled, the display will be changed | ||
1230 | * according to the new panel fitting mode. | ||
1231 | */ | ||
1232 | intel_crtc_restore_mode(crtc); | ||
1233 | } | ||
1234 | |||
1235 | return 0; | ||
1236 | } | ||
1237 | |||
1186 | static void intel_dsi_connector_destroy(struct drm_connector *connector) | 1238 | static void intel_dsi_connector_destroy(struct drm_connector *connector) |
1187 | { | 1239 | { |
1188 | struct intel_connector *intel_connector = to_intel_connector(connector); | 1240 | struct intel_connector *intel_connector = to_intel_connector(connector); |
@@ -1225,11 +1277,25 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = { | |||
1225 | .detect = intel_dsi_detect, | 1277 | .detect = intel_dsi_detect, |
1226 | .destroy = intel_dsi_connector_destroy, | 1278 | .destroy = intel_dsi_connector_destroy, |
1227 | .fill_modes = drm_helper_probe_single_connector_modes, | 1279 | .fill_modes = drm_helper_probe_single_connector_modes, |
1280 | .set_property = intel_dsi_set_property, | ||
1228 | .atomic_get_property = intel_connector_atomic_get_property, | 1281 | .atomic_get_property = intel_connector_atomic_get_property, |
1229 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, | 1282 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, |
1230 | .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, | 1283 | .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, |
1231 | }; | 1284 | }; |
1232 | 1285 | ||
1286 | static void intel_dsi_add_properties(struct intel_connector *connector) | ||
1287 | { | ||
1288 | struct drm_device *dev = connector->base.dev; | ||
1289 | |||
1290 | if (connector->panel.fixed_mode) { | ||
1291 | drm_mode_create_scaling_mode_property(dev); | ||
1292 | drm_object_attach_property(&connector->base.base, | ||
1293 | dev->mode_config.scaling_mode_property, | ||
1294 | DRM_MODE_SCALE_ASPECT); | ||
1295 | connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; | ||
1296 | } | ||
1297 | } | ||
1298 | |||
1233 | void intel_dsi_init(struct drm_device *dev) | 1299 | void intel_dsi_init(struct drm_device *dev) |
1234 | { | 1300 | { |
1235 | struct intel_dsi *intel_dsi; | 1301 | struct intel_dsi *intel_dsi; |
@@ -1353,8 +1419,6 @@ void intel_dsi_init(struct drm_device *dev) | |||
1353 | 1419 | ||
1354 | intel_connector_attach_encoder(intel_connector, intel_encoder); | 1420 | intel_connector_attach_encoder(intel_connector, intel_encoder); |
1355 | 1421 | ||
1356 | drm_connector_register(connector); | ||
1357 | |||
1358 | drm_panel_attach(intel_dsi->panel, connector); | 1422 | drm_panel_attach(intel_dsi->panel, connector); |
1359 | 1423 | ||
1360 | mutex_lock(&dev->mode_config.mutex); | 1424 | mutex_lock(&dev->mode_config.mutex); |
@@ -1373,6 +1437,11 @@ void intel_dsi_init(struct drm_device *dev) | |||
1373 | } | 1437 | } |
1374 | 1438 | ||
1375 | intel_panel_init(&intel_connector->panel, fixed_mode, NULL); | 1439 | intel_panel_init(&intel_connector->panel, fixed_mode, NULL); |
1440 | |||
1441 | intel_dsi_add_properties(intel_connector); | ||
1442 | |||
1443 | drm_connector_register(connector); | ||
1444 | |||
1376 | intel_panel_setup_backlight(connector, INVALID_PIPE); | 1445 | intel_panel_setup_backlight(connector, INVALID_PIPE); |
1377 | 1446 | ||
1378 | return; | 1447 | return; |
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h index dabde19ee8aa..61a6957fc6c2 100644 --- a/drivers/gpu/drm/i915/intel_dsi.h +++ b/drivers/gpu/drm/i915/intel_dsi.h | |||
@@ -127,11 +127,15 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder) | |||
127 | } | 127 | } |
128 | 128 | ||
129 | bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv); | 129 | bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv); |
130 | extern void intel_enable_dsi_pll(struct intel_encoder *encoder); | 130 | int intel_compute_dsi_pll(struct intel_encoder *encoder, |
131 | extern void intel_disable_dsi_pll(struct intel_encoder *encoder); | 131 | struct intel_crtc_state *config); |
132 | extern u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp); | 132 | void intel_enable_dsi_pll(struct intel_encoder *encoder, |
133 | extern void intel_dsi_reset_clocks(struct intel_encoder *encoder, | 133 | const struct intel_crtc_state *config); |
134 | enum port port); | 134 | void intel_disable_dsi_pll(struct intel_encoder *encoder); |
135 | u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, | ||
136 | struct intel_crtc_state *config); | ||
137 | void intel_dsi_reset_clocks(struct intel_encoder *encoder, | ||
138 | enum port port); | ||
135 | 139 | ||
136 | struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id); | 140 | struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id); |
137 | enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt); | 141 | enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt); |
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c index 4e53fcf6e087..1765e6e18f2c 100644 --- a/drivers/gpu/drm/i915/intel_dsi_pll.c +++ b/drivers/gpu/drm/i915/intel_dsi_pll.c | |||
@@ -30,12 +30,7 @@ | |||
30 | #include "i915_drv.h" | 30 | #include "i915_drv.h" |
31 | #include "intel_dsi.h" | 31 | #include "intel_dsi.h" |
32 | 32 | ||
33 | struct dsi_mnp { | 33 | static const u16 lfsr_converts[] = { |
34 | u32 dsi_pll_ctrl; | ||
35 | u32 dsi_pll_div; | ||
36 | }; | ||
37 | |||
38 | static const u32 lfsr_converts[] = { | ||
39 | 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */ | 34 | 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */ |
40 | 461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */ | 35 | 461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */ |
41 | 106, 53, 282, 397, 454, 227, 113, 56, 284, 142, /* 81 - 90 */ | 36 | 106, 53, 282, 397, 454, 227, 113, 56, 284, 142, /* 81 - 90 */ |
@@ -57,7 +52,8 @@ static u32 dsi_clk_from_pclk(u32 pclk, enum mipi_dsi_pixel_format fmt, | |||
57 | } | 52 | } |
58 | 53 | ||
59 | static int dsi_calc_mnp(struct drm_i915_private *dev_priv, | 54 | static int dsi_calc_mnp(struct drm_i915_private *dev_priv, |
60 | struct dsi_mnp *dsi_mnp, int target_dsi_clk) | 55 | struct intel_crtc_state *config, |
56 | int target_dsi_clk) | ||
61 | { | 57 | { |
62 | unsigned int calc_m = 0, calc_p = 0; | 58 | unsigned int calc_m = 0, calc_p = 0; |
63 | unsigned int m_min, m_max, p_min = 2, p_max = 6; | 59 | unsigned int m_min, m_max, p_min = 2, p_max = 6; |
@@ -103,8 +99,8 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv, | |||
103 | /* register has log2(N1), this works fine for powers of two */ | 99 | /* register has log2(N1), this works fine for powers of two */ |
104 | n = ffs(n) - 1; | 100 | n = ffs(n) - 1; |
105 | m_seed = lfsr_converts[calc_m - 62]; | 101 | m_seed = lfsr_converts[calc_m - 62]; |
106 | dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2); | 102 | config->dsi_pll.ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2); |
107 | dsi_mnp->dsi_pll_div = n << DSI_PLL_N1_DIV_SHIFT | | 103 | config->dsi_pll.div = n << DSI_PLL_N1_DIV_SHIFT | |
108 | m_seed << DSI_PLL_M1_DIV_SHIFT; | 104 | m_seed << DSI_PLL_M1_DIV_SHIFT; |
109 | 105 | ||
110 | return 0; | 106 | return 0; |
@@ -114,54 +110,55 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv, | |||
114 | * XXX: The muxing and gating is hard coded for now. Need to add support for | 110 | * XXX: The muxing and gating is hard coded for now. Need to add support for |
115 | * sharing PLLs with two DSI outputs. | 111 | * sharing PLLs with two DSI outputs. |
116 | */ | 112 | */ |
117 | static void vlv_configure_dsi_pll(struct intel_encoder *encoder) | 113 | static int vlv_compute_dsi_pll(struct intel_encoder *encoder, |
114 | struct intel_crtc_state *config) | ||
118 | { | 115 | { |
119 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; | 116 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; |
120 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | 117 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); |
121 | int ret; | 118 | int ret; |
122 | struct dsi_mnp dsi_mnp; | ||
123 | u32 dsi_clk; | 119 | u32 dsi_clk; |
124 | 120 | ||
125 | dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format, | 121 | dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format, |
126 | intel_dsi->lane_count); | 122 | intel_dsi->lane_count); |
127 | 123 | ||
128 | ret = dsi_calc_mnp(dev_priv, &dsi_mnp, dsi_clk); | 124 | ret = dsi_calc_mnp(dev_priv, config, dsi_clk); |
129 | if (ret) { | 125 | if (ret) { |
130 | DRM_DEBUG_KMS("dsi_calc_mnp failed\n"); | 126 | DRM_DEBUG_KMS("dsi_calc_mnp failed\n"); |
131 | return; | 127 | return ret; |
132 | } | 128 | } |
133 | 129 | ||
134 | if (intel_dsi->ports & (1 << PORT_A)) | 130 | if (intel_dsi->ports & (1 << PORT_A)) |
135 | dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL; | 131 | config->dsi_pll.ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL; |
136 | 132 | ||
137 | if (intel_dsi->ports & (1 << PORT_C)) | 133 | if (intel_dsi->ports & (1 << PORT_C)) |
138 | dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI1_DSIPLL; | 134 | config->dsi_pll.ctrl |= DSI_PLL_CLK_GATE_DSI1_DSIPLL; |
135 | |||
136 | config->dsi_pll.ctrl |= DSI_PLL_VCO_EN; | ||
139 | 137 | ||
140 | DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n", | 138 | DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n", |
141 | dsi_mnp.dsi_pll_div, dsi_mnp.dsi_pll_ctrl); | 139 | config->dsi_pll.div, config->dsi_pll.ctrl); |
142 | 140 | ||
143 | vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0); | 141 | return 0; |
144 | vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, dsi_mnp.dsi_pll_div); | ||
145 | vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, dsi_mnp.dsi_pll_ctrl); | ||
146 | } | 142 | } |
147 | 143 | ||
148 | static void vlv_enable_dsi_pll(struct intel_encoder *encoder) | 144 | static void vlv_enable_dsi_pll(struct intel_encoder *encoder, |
145 | const struct intel_crtc_state *config) | ||
149 | { | 146 | { |
150 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; | 147 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
151 | u32 tmp; | ||
152 | 148 | ||
153 | DRM_DEBUG_KMS("\n"); | 149 | DRM_DEBUG_KMS("\n"); |
154 | 150 | ||
155 | mutex_lock(&dev_priv->sb_lock); | 151 | mutex_lock(&dev_priv->sb_lock); |
156 | 152 | ||
157 | vlv_configure_dsi_pll(encoder); | 153 | vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0); |
154 | vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, config->dsi_pll.div); | ||
155 | vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, | ||
156 | config->dsi_pll.ctrl & ~DSI_PLL_VCO_EN); | ||
158 | 157 | ||
159 | /* wait at least 0.5 us after ungating before enabling VCO */ | 158 | /* wait at least 0.5 us after ungating before enabling VCO */ |
160 | usleep_range(1, 10); | 159 | usleep_range(1, 10); |
161 | 160 | ||
162 | tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); | 161 | vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, config->dsi_pll.ctrl); |
163 | tmp |= DSI_PLL_VCO_EN; | ||
164 | vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp); | ||
165 | 162 | ||
166 | if (wait_for(vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL) & | 163 | if (wait_for(vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL) & |
167 | DSI_PLL_LOCK, 20)) { | 164 | DSI_PLL_LOCK, 20)) { |
@@ -177,7 +174,7 @@ static void vlv_enable_dsi_pll(struct intel_encoder *encoder) | |||
177 | 174 | ||
178 | static void vlv_disable_dsi_pll(struct intel_encoder *encoder) | 175 | static void vlv_disable_dsi_pll(struct intel_encoder *encoder) |
179 | { | 176 | { |
180 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; | 177 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
181 | u32 tmp; | 178 | u32 tmp; |
182 | 179 | ||
183 | DRM_DEBUG_KMS("\n"); | 180 | DRM_DEBUG_KMS("\n"); |
@@ -224,7 +221,7 @@ static bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) | |||
224 | 221 | ||
225 | static void bxt_disable_dsi_pll(struct intel_encoder *encoder) | 222 | static void bxt_disable_dsi_pll(struct intel_encoder *encoder) |
226 | { | 223 | { |
227 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; | 224 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
228 | u32 val; | 225 | u32 val; |
229 | 226 | ||
230 | DRM_DEBUG_KMS("\n"); | 227 | DRM_DEBUG_KMS("\n"); |
@@ -251,14 +248,15 @@ static void assert_bpp_mismatch(enum mipi_dsi_pixel_format fmt, int pipe_bpp) | |||
251 | bpp, pipe_bpp); | 248 | bpp, pipe_bpp); |
252 | } | 249 | } |
253 | 250 | ||
254 | static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp) | 251 | static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, |
252 | struct intel_crtc_state *config) | ||
255 | { | 253 | { |
256 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; | 254 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
257 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | 255 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); |
258 | u32 dsi_clock, pclk; | 256 | u32 dsi_clock, pclk; |
259 | u32 pll_ctl, pll_div; | 257 | u32 pll_ctl, pll_div; |
260 | u32 m = 0, p = 0, n; | 258 | u32 m = 0, p = 0, n; |
261 | int refclk = 25000; | 259 | int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000; |
262 | int i; | 260 | int i; |
263 | 261 | ||
264 | DRM_DEBUG_KMS("\n"); | 262 | DRM_DEBUG_KMS("\n"); |
@@ -268,6 +266,9 @@ static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp) | |||
268 | pll_div = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_DIVIDER); | 266 | pll_div = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_DIVIDER); |
269 | mutex_unlock(&dev_priv->sb_lock); | 267 | mutex_unlock(&dev_priv->sb_lock); |
270 | 268 | ||
269 | config->dsi_pll.ctrl = pll_ctl & ~DSI_PLL_LOCK; | ||
270 | config->dsi_pll.div = pll_div; | ||
271 | |||
271 | /* mask out other bits and extract the P1 divisor */ | 272 | /* mask out other bits and extract the P1 divisor */ |
272 | pll_ctl &= DSI_PLL_P1_POST_DIV_MASK; | 273 | pll_ctl &= DSI_PLL_P1_POST_DIV_MASK; |
273 | pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2); | 274 | pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2); |
@@ -313,7 +314,8 @@ static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp) | |||
313 | return pclk; | 314 | return pclk; |
314 | } | 315 | } |
315 | 316 | ||
316 | static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp) | 317 | static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, |
318 | struct intel_crtc_state *config) | ||
317 | { | 319 | { |
318 | u32 pclk; | 320 | u32 pclk; |
319 | u32 dsi_clk; | 321 | u32 dsi_clk; |
@@ -327,15 +329,9 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp) | |||
327 | return 0; | 329 | return 0; |
328 | } | 330 | } |
329 | 331 | ||
330 | dsi_ratio = I915_READ(BXT_DSI_PLL_CTL) & | 332 | config->dsi_pll.ctrl = I915_READ(BXT_DSI_PLL_CTL); |
331 | BXT_DSI_PLL_RATIO_MASK; | ||
332 | 333 | ||
333 | /* Invalid DSI ratio ? */ | 334 | dsi_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK; |
334 | if (dsi_ratio < BXT_DSI_PLL_RATIO_MIN || | ||
335 | dsi_ratio > BXT_DSI_PLL_RATIO_MAX) { | ||
336 | DRM_ERROR("Invalid DSI pll ratio(%u) programmed\n", dsi_ratio); | ||
337 | return 0; | ||
338 | } | ||
339 | 335 | ||
340 | dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2; | 336 | dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2; |
341 | 337 | ||
@@ -348,12 +344,13 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp) | |||
348 | return pclk; | 344 | return pclk; |
349 | } | 345 | } |
350 | 346 | ||
351 | u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp) | 347 | u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, |
348 | struct intel_crtc_state *config) | ||
352 | { | 349 | { |
353 | if (IS_BROXTON(encoder->base.dev)) | 350 | if (IS_BROXTON(encoder->base.dev)) |
354 | return bxt_dsi_get_pclk(encoder, pipe_bpp); | 351 | return bxt_dsi_get_pclk(encoder, pipe_bpp, config); |
355 | else | 352 | else |
356 | return vlv_dsi_get_pclk(encoder, pipe_bpp); | 353 | return vlv_dsi_get_pclk(encoder, pipe_bpp, config); |
357 | } | 354 | } |
358 | 355 | ||
359 | static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) | 356 | static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) |
@@ -370,7 +367,8 @@ static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) | |||
370 | } | 367 | } |
371 | 368 | ||
372 | /* Program BXT Mipi clocks and dividers */ | 369 | /* Program BXT Mipi clocks and dividers */ |
373 | static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port) | 370 | static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port, |
371 | const struct intel_crtc_state *config) | ||
374 | { | 372 | { |
375 | struct drm_i915_private *dev_priv = dev->dev_private; | 373 | struct drm_i915_private *dev_priv = dev->dev_private; |
376 | u32 tmp; | 374 | u32 tmp; |
@@ -390,8 +388,7 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port) | |||
390 | tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port)); | 388 | tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port)); |
391 | 389 | ||
392 | /* Get the current DSI rate(actual) */ | 390 | /* Get the current DSI rate(actual) */ |
393 | pll_ratio = I915_READ(BXT_DSI_PLL_CTL) & | 391 | pll_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK; |
394 | BXT_DSI_PLL_RATIO_MASK; | ||
395 | dsi_rate = (BXT_REF_CLOCK_KHZ * pll_ratio) / 2; | 392 | dsi_rate = (BXT_REF_CLOCK_KHZ * pll_ratio) / 2; |
396 | 393 | ||
397 | /* | 394 | /* |
@@ -427,16 +424,15 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port) | |||
427 | I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp); | 424 | I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp); |
428 | } | 425 | } |
429 | 426 | ||
430 | static bool bxt_configure_dsi_pll(struct intel_encoder *encoder) | 427 | static int bxt_compute_dsi_pll(struct intel_encoder *encoder, |
428 | struct intel_crtc_state *config) | ||
431 | { | 429 | { |
432 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; | ||
433 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | 430 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); |
434 | u8 dsi_ratio; | 431 | u8 dsi_ratio; |
435 | u32 dsi_clk; | 432 | u32 dsi_clk; |
436 | u32 val; | ||
437 | 433 | ||
438 | dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format, | 434 | dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format, |
439 | intel_dsi->lane_count); | 435 | intel_dsi->lane_count); |
440 | 436 | ||
441 | /* | 437 | /* |
442 | * From clock diagram, to get PLL ratio divider, divide double of DSI | 438 | * From clock diagram, to get PLL ratio divider, divide double of DSI |
@@ -445,9 +441,9 @@ static bool bxt_configure_dsi_pll(struct intel_encoder *encoder) | |||
445 | */ | 441 | */ |
446 | dsi_ratio = DIV_ROUND_UP(dsi_clk * 2, BXT_REF_CLOCK_KHZ); | 442 | dsi_ratio = DIV_ROUND_UP(dsi_clk * 2, BXT_REF_CLOCK_KHZ); |
447 | if (dsi_ratio < BXT_DSI_PLL_RATIO_MIN || | 443 | if (dsi_ratio < BXT_DSI_PLL_RATIO_MIN || |
448 | dsi_ratio > BXT_DSI_PLL_RATIO_MAX) { | 444 | dsi_ratio > BXT_DSI_PLL_RATIO_MAX) { |
449 | DRM_ERROR("Cant get a suitable ratio from DSI PLL ratios\n"); | 445 | DRM_ERROR("Cant get a suitable ratio from DSI PLL ratios\n"); |
450 | return false; | 446 | return -ECHRNG; |
451 | } | 447 | } |
452 | 448 | ||
453 | /* | 449 | /* |
@@ -455,27 +451,19 @@ static bool bxt_configure_dsi_pll(struct intel_encoder *encoder) | |||
455 | * Spec says both have to be programmed, even if one is not getting | 451 | * Spec says both have to be programmed, even if one is not getting |
456 | * used. Configure MIPI_CLOCK_CTL dividers in modeset | 452 | * used. Configure MIPI_CLOCK_CTL dividers in modeset |
457 | */ | 453 | */ |
458 | val = I915_READ(BXT_DSI_PLL_CTL); | 454 | config->dsi_pll.ctrl = dsi_ratio | BXT_DSIA_16X_BY2 | BXT_DSIC_16X_BY2; |
459 | val &= ~BXT_DSI_PLL_PVD_RATIO_MASK; | ||
460 | val &= ~BXT_DSI_FREQ_SEL_MASK; | ||
461 | val &= ~BXT_DSI_PLL_RATIO_MASK; | ||
462 | val |= (dsi_ratio | BXT_DSIA_16X_BY2 | BXT_DSIC_16X_BY2); | ||
463 | 455 | ||
464 | /* As per recommendation from hardware team, | 456 | /* As per recommendation from hardware team, |
465 | * Prog PVD ratio =1 if dsi ratio <= 50 | 457 | * Prog PVD ratio =1 if dsi ratio <= 50 |
466 | */ | 458 | */ |
467 | if (dsi_ratio <= 50) { | 459 | if (dsi_ratio <= 50) |
468 | val &= ~BXT_DSI_PLL_PVD_RATIO_MASK; | 460 | config->dsi_pll.ctrl |= BXT_DSI_PLL_PVD_RATIO_1; |
469 | val |= BXT_DSI_PLL_PVD_RATIO_1; | ||
470 | } | ||
471 | 461 | ||
472 | I915_WRITE(BXT_DSI_PLL_CTL, val); | 462 | return 0; |
473 | POSTING_READ(BXT_DSI_PLL_CTL); | ||
474 | |||
475 | return true; | ||
476 | } | 463 | } |
477 | 464 | ||
478 | static void bxt_enable_dsi_pll(struct intel_encoder *encoder) | 465 | static void bxt_enable_dsi_pll(struct intel_encoder *encoder, |
466 | const struct intel_crtc_state *config) | ||
479 | { | 467 | { |
480 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; | 468 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; |
481 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | 469 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); |
@@ -484,23 +472,13 @@ static void bxt_enable_dsi_pll(struct intel_encoder *encoder) | |||
484 | 472 | ||
485 | DRM_DEBUG_KMS("\n"); | 473 | DRM_DEBUG_KMS("\n"); |
486 | 474 | ||
487 | val = I915_READ(BXT_DSI_PLL_ENABLE); | ||
488 | |||
489 | if (val & BXT_DSI_PLL_DO_ENABLE) { | ||
490 | WARN(1, "DSI PLL already enabled. Disabling it.\n"); | ||
491 | val &= ~BXT_DSI_PLL_DO_ENABLE; | ||
492 | I915_WRITE(BXT_DSI_PLL_ENABLE, val); | ||
493 | } | ||
494 | |||
495 | /* Configure PLL vales */ | 475 | /* Configure PLL vales */ |
496 | if (!bxt_configure_dsi_pll(encoder)) { | 476 | I915_WRITE(BXT_DSI_PLL_CTL, config->dsi_pll.ctrl); |
497 | DRM_ERROR("Configure DSI PLL failed, abort PLL enable\n"); | 477 | POSTING_READ(BXT_DSI_PLL_CTL); |
498 | return; | ||
499 | } | ||
500 | 478 | ||
501 | /* Program TX, RX, Dphy clocks */ | 479 | /* Program TX, RX, Dphy clocks */ |
502 | for_each_dsi_port(port, intel_dsi->ports) | 480 | for_each_dsi_port(port, intel_dsi->ports) |
503 | bxt_dsi_program_clocks(encoder->base.dev, port); | 481 | bxt_dsi_program_clocks(encoder->base.dev, port, config); |
504 | 482 | ||
505 | /* Enable DSI PLL */ | 483 | /* Enable DSI PLL */ |
506 | val = I915_READ(BXT_DSI_PLL_ENABLE); | 484 | val = I915_READ(BXT_DSI_PLL_ENABLE); |
@@ -526,14 +504,28 @@ bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) | |||
526 | return false; | 504 | return false; |
527 | } | 505 | } |
528 | 506 | ||
529 | void intel_enable_dsi_pll(struct intel_encoder *encoder) | 507 | int intel_compute_dsi_pll(struct intel_encoder *encoder, |
508 | struct intel_crtc_state *config) | ||
509 | { | ||
510 | struct drm_device *dev = encoder->base.dev; | ||
511 | |||
512 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) | ||
513 | return vlv_compute_dsi_pll(encoder, config); | ||
514 | else if (IS_BROXTON(dev)) | ||
515 | return bxt_compute_dsi_pll(encoder, config); | ||
516 | |||
517 | return -ENODEV; | ||
518 | } | ||
519 | |||
520 | void intel_enable_dsi_pll(struct intel_encoder *encoder, | ||
521 | const struct intel_crtc_state *config) | ||
530 | { | 522 | { |
531 | struct drm_device *dev = encoder->base.dev; | 523 | struct drm_device *dev = encoder->base.dev; |
532 | 524 | ||
533 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) | 525 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
534 | vlv_enable_dsi_pll(encoder); | 526 | vlv_enable_dsi_pll(encoder, config); |
535 | else if (IS_BROXTON(dev)) | 527 | else if (IS_BROXTON(dev)) |
536 | bxt_enable_dsi_pll(encoder); | 528 | bxt_enable_dsi_pll(encoder, config); |
537 | } | 529 | } |
538 | 530 | ||
539 | void intel_disable_dsi_pll(struct intel_encoder *encoder) | 531 | void intel_disable_dsi_pll(struct intel_encoder *encoder) |
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index 73002e901ff2..9d79c4c3e256 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h | |||
@@ -27,8 +27,34 @@ | |||
27 | #include "intel_guc_fwif.h" | 27 | #include "intel_guc_fwif.h" |
28 | #include "i915_guc_reg.h" | 28 | #include "i915_guc_reg.h" |
29 | 29 | ||
30 | struct drm_i915_gem_request; | ||
31 | |||
32 | /* | ||
33 | * This structure primarily describes the GEM object shared with the GuC. | ||
34 | * The GEM object is held for the entire lifetime of our interaction with | ||
35 | * the GuC, being allocated before the GuC is loaded with its firmware. | ||
36 | * Because there's no way to update the address used by the GuC after | ||
37 | * initialisation, the shared object must stay pinned into the GGTT as | ||
38 | * long as the GuC is in use. We also keep the first page (only) mapped | ||
39 | * into kernel address space, as it includes shared data that must be | ||
40 | * updated on every request submission. | ||
41 | * | ||
42 | * The single GEM object described here is actually made up of several | ||
43 | * separate areas, as far as the GuC is concerned. The first page (kept | ||
44 | * kmap'd) includes the "process decriptor" which holds sequence data for | ||
45 | * the doorbell, and one cacheline which actually *is* the doorbell; a | ||
46 | * write to this will "ring the doorbell" (i.e. send an interrupt to the | ||
47 | * GuC). The subsequent pages of the client object constitute the work | ||
48 | * queue (a circular array of work items), again described in the process | ||
49 | * descriptor. Work queue pages are mapped momentarily as required. | ||
50 | * | ||
51 | * Finally, we also keep a few statistics here, including the number of | ||
52 | * submissions to each engine, and a record of the last submission failure | ||
53 | * (if any). | ||
54 | */ | ||
30 | struct i915_guc_client { | 55 | struct i915_guc_client { |
31 | struct drm_i915_gem_object *client_obj; | 56 | struct drm_i915_gem_object *client_obj; |
57 | void *client_base; /* first page (only) of above */ | ||
32 | struct intel_context *owner; | 58 | struct intel_context *owner; |
33 | struct intel_guc *guc; | 59 | struct intel_guc *guc; |
34 | uint32_t priority; | 60 | uint32_t priority; |
@@ -43,13 +69,14 @@ struct i915_guc_client { | |||
43 | uint32_t wq_offset; | 69 | uint32_t wq_offset; |
44 | uint32_t wq_size; | 70 | uint32_t wq_size; |
45 | uint32_t wq_tail; | 71 | uint32_t wq_tail; |
46 | uint32_t wq_head; | 72 | uint32_t unused; /* Was 'wq_head' */ |
47 | 73 | ||
48 | /* GuC submission statistics & status */ | 74 | /* GuC submission statistics & status */ |
49 | uint64_t submissions[GUC_MAX_ENGINES_NUM]; | 75 | uint64_t submissions[GUC_MAX_ENGINES_NUM]; |
50 | uint32_t q_fail; | 76 | uint32_t q_fail; |
51 | uint32_t b_fail; | 77 | uint32_t b_fail; |
52 | int retcode; | 78 | int retcode; |
79 | int spare; /* pad to 32 DWords */ | ||
53 | }; | 80 | }; |
54 | 81 | ||
55 | enum intel_guc_fw_status { | 82 | enum intel_guc_fw_status { |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index b199ede08f72..2cdab73046f8 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -1412,8 +1412,16 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) | |||
1412 | hdmi_to_dig_port(intel_hdmi)); | 1412 | hdmi_to_dig_port(intel_hdmi)); |
1413 | } | 1413 | } |
1414 | 1414 | ||
1415 | if (!live_status) | 1415 | if (!live_status) { |
1416 | DRM_DEBUG_KMS("Live status not up!"); | 1416 | DRM_DEBUG_KMS("HDMI live status down\n"); |
1417 | /* | ||
1418 | * Live status register is not reliable on all intel platforms. | ||
1419 | * So consider live_status only for certain platforms, for | ||
1420 | * others, read EDID to determine presence of sink. | ||
1421 | */ | ||
1422 | if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv)) | ||
1423 | live_status = true; | ||
1424 | } | ||
1417 | 1425 | ||
1418 | intel_hdmi_unset_edid(connector); | 1426 | intel_hdmi_unset_edid(connector); |
1419 | 1427 | ||
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 6dbe73ecb41a..81de23098be7 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -571,15 +571,14 @@ clear_err: | |||
571 | goto out; | 571 | goto out; |
572 | 572 | ||
573 | timeout: | 573 | timeout: |
574 | DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n", | 574 | DRM_DEBUG_KMS("GMBUS [%s] timed out, falling back to bit banging on pin %d\n", |
575 | bus->adapter.name, bus->reg0 & 0xff); | 575 | bus->adapter.name, bus->reg0 & 0xff); |
576 | I915_WRITE(GMBUS0, 0); | 576 | I915_WRITE(GMBUS0, 0); |
577 | 577 | ||
578 | /* | 578 | /* |
579 | * Hardware may not support GMBUS over these pins? Try GPIO bitbanging | 579 | * Hardware may not support GMBUS over these pins? Try GPIO bitbanging |
580 | * instead. Use EAGAIN to have i2c core retry. | 580 | * instead. Use EAGAIN to have i2c core retry. |
581 | */ | 581 | */ |
582 | bus->force_bit = 1; | ||
583 | ret = -EAGAIN; | 582 | ret = -EAGAIN; |
584 | 583 | ||
585 | out: | 584 | out: |
@@ -597,10 +596,15 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) | |||
597 | intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); | 596 | intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); |
598 | mutex_lock(&dev_priv->gmbus_mutex); | 597 | mutex_lock(&dev_priv->gmbus_mutex); |
599 | 598 | ||
600 | if (bus->force_bit) | 599 | if (bus->force_bit) { |
601 | ret = i2c_bit_algo.master_xfer(adapter, msgs, num); | 600 | ret = i2c_bit_algo.master_xfer(adapter, msgs, num); |
602 | else | 601 | if (ret < 0) |
602 | bus->force_bit &= ~GMBUS_FORCE_BIT_RETRY; | ||
603 | } else { | ||
603 | ret = do_gmbus_xfer(adapter, msgs, num); | 604 | ret = do_gmbus_xfer(adapter, msgs, num); |
605 | if (ret == -EAGAIN) | ||
606 | bus->force_bit |= GMBUS_FORCE_BIT_RETRY; | ||
607 | } | ||
604 | 608 | ||
605 | mutex_unlock(&dev_priv->gmbus_mutex); | 609 | mutex_unlock(&dev_priv->gmbus_mutex); |
606 | intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); | 610 | intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); |
@@ -718,11 +722,16 @@ void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed) | |||
718 | void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) | 722 | void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) |
719 | { | 723 | { |
720 | struct intel_gmbus *bus = to_intel_gmbus(adapter); | 724 | struct intel_gmbus *bus = to_intel_gmbus(adapter); |
725 | struct drm_i915_private *dev_priv = bus->dev_priv; | ||
726 | |||
727 | mutex_lock(&dev_priv->gmbus_mutex); | ||
721 | 728 | ||
722 | bus->force_bit += force_bit ? 1 : -1; | 729 | bus->force_bit += force_bit ? 1 : -1; |
723 | DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n", | 730 | DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n", |
724 | force_bit ? "en" : "dis", adapter->name, | 731 | force_bit ? "en" : "dis", adapter->name, |
725 | bus->force_bit); | 732 | bus->force_bit); |
733 | |||
734 | mutex_unlock(&dev_priv->gmbus_mutex); | ||
726 | } | 735 | } |
727 | 736 | ||
728 | void intel_teardown_gmbus(struct drm_device *dev) | 737 | void intel_teardown_gmbus(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 0d6dc5ec4a46..6179b591ee84 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
@@ -229,9 +229,6 @@ enum { | |||
229 | 229 | ||
230 | static int intel_lr_context_pin(struct intel_context *ctx, | 230 | static int intel_lr_context_pin(struct intel_context *ctx, |
231 | struct intel_engine_cs *engine); | 231 | struct intel_engine_cs *engine); |
232 | static void lrc_setup_hardware_status_page(struct intel_engine_cs *engine, | ||
233 | struct drm_i915_gem_object *default_ctx_obj); | ||
234 | |||
235 | 232 | ||
236 | /** | 233 | /** |
237 | * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists | 234 | * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists |
@@ -418,6 +415,7 @@ static void execlists_submit_requests(struct drm_i915_gem_request *rq0, | |||
418 | struct drm_i915_gem_request *rq1) | 415 | struct drm_i915_gem_request *rq1) |
419 | { | 416 | { |
420 | struct drm_i915_private *dev_priv = rq0->i915; | 417 | struct drm_i915_private *dev_priv = rq0->i915; |
418 | unsigned int fw_domains = rq0->engine->fw_domains; | ||
421 | 419 | ||
422 | execlists_update_context(rq0); | 420 | execlists_update_context(rq0); |
423 | 421 | ||
@@ -425,11 +423,11 @@ static void execlists_submit_requests(struct drm_i915_gem_request *rq0, | |||
425 | execlists_update_context(rq1); | 423 | execlists_update_context(rq1); |
426 | 424 | ||
427 | spin_lock_irq(&dev_priv->uncore.lock); | 425 | spin_lock_irq(&dev_priv->uncore.lock); |
428 | intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL); | 426 | intel_uncore_forcewake_get__locked(dev_priv, fw_domains); |
429 | 427 | ||
430 | execlists_elsp_write(rq0, rq1); | 428 | execlists_elsp_write(rq0, rq1); |
431 | 429 | ||
432 | intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL); | 430 | intel_uncore_forcewake_put__locked(dev_priv, fw_domains); |
433 | spin_unlock_irq(&dev_priv->uncore.lock); | 431 | spin_unlock_irq(&dev_priv->uncore.lock); |
434 | } | 432 | } |
435 | 433 | ||
@@ -552,7 +550,7 @@ static void intel_lrc_irq_handler(unsigned long data) | |||
552 | unsigned int csb_read = 0, i; | 550 | unsigned int csb_read = 0, i; |
553 | unsigned int submit_contexts = 0; | 551 | unsigned int submit_contexts = 0; |
554 | 552 | ||
555 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | 553 | intel_uncore_forcewake_get(dev_priv, engine->fw_domains); |
556 | 554 | ||
557 | status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine)); | 555 | status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine)); |
558 | 556 | ||
@@ -577,7 +575,7 @@ static void intel_lrc_irq_handler(unsigned long data) | |||
577 | _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, | 575 | _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, |
578 | engine->next_context_status_buffer << 8)); | 576 | engine->next_context_status_buffer << 8)); |
579 | 577 | ||
580 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | 578 | intel_uncore_forcewake_put(dev_priv, engine->fw_domains); |
581 | 579 | ||
582 | spin_lock(&engine->execlist_lock); | 580 | spin_lock(&engine->execlist_lock); |
583 | 581 | ||
@@ -892,17 +890,8 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes) | |||
892 | */ | 890 | */ |
893 | int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords) | 891 | int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords) |
894 | { | 892 | { |
895 | struct drm_i915_private *dev_priv; | ||
896 | int ret; | 893 | int ret; |
897 | 894 | ||
898 | WARN_ON(req == NULL); | ||
899 | dev_priv = req->i915; | ||
900 | |||
901 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, | ||
902 | dev_priv->mm.interruptible); | ||
903 | if (ret) | ||
904 | return ret; | ||
905 | |||
906 | ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t)); | 895 | ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t)); |
907 | if (ret) | 896 | if (ret) |
908 | return ret; | 897 | return ret; |
@@ -1016,7 +1005,6 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, | |||
1016 | trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags); | 1005 | trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags); |
1017 | 1006 | ||
1018 | i915_gem_execbuffer_move_to_active(vmas, params->request); | 1007 | i915_gem_execbuffer_move_to_active(vmas, params->request); |
1019 | i915_gem_execbuffer_retire_commands(params); | ||
1020 | 1008 | ||
1021 | return 0; | 1009 | return 0; |
1022 | } | 1010 | } |
@@ -1057,7 +1045,7 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine) | |||
1057 | return; | 1045 | return; |
1058 | 1046 | ||
1059 | ret = intel_engine_idle(engine); | 1047 | ret = intel_engine_idle(engine); |
1060 | if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error)) | 1048 | if (ret) |
1061 | DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", | 1049 | DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", |
1062 | engine->name, ret); | 1050 | engine->name, ret); |
1063 | 1051 | ||
@@ -1093,8 +1081,8 @@ static int intel_lr_context_do_pin(struct intel_context *ctx, | |||
1093 | struct drm_i915_private *dev_priv = dev->dev_private; | 1081 | struct drm_i915_private *dev_priv = dev->dev_private; |
1094 | struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; | 1082 | struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; |
1095 | struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf; | 1083 | struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf; |
1096 | struct page *lrc_state_page; | 1084 | void *vaddr; |
1097 | uint32_t *lrc_reg_state; | 1085 | u32 *lrc_reg_state; |
1098 | int ret; | 1086 | int ret; |
1099 | 1087 | ||
1100 | WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex)); | 1088 | WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex)); |
@@ -1104,19 +1092,20 @@ static int intel_lr_context_do_pin(struct intel_context *ctx, | |||
1104 | if (ret) | 1092 | if (ret) |
1105 | return ret; | 1093 | return ret; |
1106 | 1094 | ||
1107 | lrc_state_page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); | 1095 | vaddr = i915_gem_object_pin_map(ctx_obj); |
1108 | if (WARN_ON(!lrc_state_page)) { | 1096 | if (IS_ERR(vaddr)) { |
1109 | ret = -ENODEV; | 1097 | ret = PTR_ERR(vaddr); |
1110 | goto unpin_ctx_obj; | 1098 | goto unpin_ctx_obj; |
1111 | } | 1099 | } |
1112 | 1100 | ||
1101 | lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; | ||
1102 | |||
1113 | ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf); | 1103 | ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf); |
1114 | if (ret) | 1104 | if (ret) |
1115 | goto unpin_ctx_obj; | 1105 | goto unpin_map; |
1116 | 1106 | ||
1117 | ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj); | 1107 | ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj); |
1118 | intel_lr_context_descriptor_update(ctx, engine); | 1108 | intel_lr_context_descriptor_update(ctx, engine); |
1119 | lrc_reg_state = kmap(lrc_state_page); | ||
1120 | lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start; | 1109 | lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start; |
1121 | ctx->engine[engine->id].lrc_reg_state = lrc_reg_state; | 1110 | ctx->engine[engine->id].lrc_reg_state = lrc_reg_state; |
1122 | ctx_obj->dirty = true; | 1111 | ctx_obj->dirty = true; |
@@ -1127,6 +1116,8 @@ static int intel_lr_context_do_pin(struct intel_context *ctx, | |||
1127 | 1116 | ||
1128 | return ret; | 1117 | return ret; |
1129 | 1118 | ||
1119 | unpin_map: | ||
1120 | i915_gem_object_unpin_map(ctx_obj); | ||
1130 | unpin_ctx_obj: | 1121 | unpin_ctx_obj: |
1131 | i915_gem_object_ggtt_unpin(ctx_obj); | 1122 | i915_gem_object_ggtt_unpin(ctx_obj); |
1132 | 1123 | ||
@@ -1159,7 +1150,7 @@ void intel_lr_context_unpin(struct intel_context *ctx, | |||
1159 | 1150 | ||
1160 | WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex)); | 1151 | WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex)); |
1161 | if (--ctx->engine[engine->id].pin_count == 0) { | 1152 | if (--ctx->engine[engine->id].pin_count == 0) { |
1162 | kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state)); | 1153 | i915_gem_object_unpin_map(ctx_obj); |
1163 | intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf); | 1154 | intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf); |
1164 | i915_gem_object_ggtt_unpin(ctx_obj); | 1155 | i915_gem_object_ggtt_unpin(ctx_obj); |
1165 | ctx->engine[engine->id].lrc_vma = NULL; | 1156 | ctx->engine[engine->id].lrc_vma = NULL; |
@@ -1579,14 +1570,22 @@ out: | |||
1579 | return ret; | 1570 | return ret; |
1580 | } | 1571 | } |
1581 | 1572 | ||
1573 | static void lrc_init_hws(struct intel_engine_cs *engine) | ||
1574 | { | ||
1575 | struct drm_i915_private *dev_priv = engine->dev->dev_private; | ||
1576 | |||
1577 | I915_WRITE(RING_HWS_PGA(engine->mmio_base), | ||
1578 | (u32)engine->status_page.gfx_addr); | ||
1579 | POSTING_READ(RING_HWS_PGA(engine->mmio_base)); | ||
1580 | } | ||
1581 | |||
1582 | static int gen8_init_common_ring(struct intel_engine_cs *engine) | 1582 | static int gen8_init_common_ring(struct intel_engine_cs *engine) |
1583 | { | 1583 | { |
1584 | struct drm_device *dev = engine->dev; | 1584 | struct drm_device *dev = engine->dev; |
1585 | struct drm_i915_private *dev_priv = dev->dev_private; | 1585 | struct drm_i915_private *dev_priv = dev->dev_private; |
1586 | unsigned int next_context_status_buffer_hw; | 1586 | unsigned int next_context_status_buffer_hw; |
1587 | 1587 | ||
1588 | lrc_setup_hardware_status_page(engine, | 1588 | lrc_init_hws(engine); |
1589 | dev_priv->kernel_context->engine[engine->id].state); | ||
1590 | 1589 | ||
1591 | I915_WRITE_IMR(engine, | 1590 | I915_WRITE_IMR(engine, |
1592 | ~(engine->irq_enable_mask | engine->irq_keep_mask)); | 1591 | ~(engine->irq_enable_mask | engine->irq_keep_mask)); |
@@ -1625,7 +1624,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine) | |||
1625 | 1624 | ||
1626 | intel_engine_init_hangcheck(engine); | 1625 | intel_engine_init_hangcheck(engine); |
1627 | 1626 | ||
1628 | return 0; | 1627 | return intel_mocs_init_engine(engine); |
1629 | } | 1628 | } |
1630 | 1629 | ||
1631 | static int gen8_init_render_ring(struct intel_engine_cs *engine) | 1630 | static int gen8_init_render_ring(struct intel_engine_cs *engine) |
@@ -1945,15 +1944,18 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request) | |||
1945 | struct intel_ringbuffer *ringbuf = request->ringbuf; | 1944 | struct intel_ringbuffer *ringbuf = request->ringbuf; |
1946 | int ret; | 1945 | int ret; |
1947 | 1946 | ||
1948 | ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS); | 1947 | ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS); |
1949 | if (ret) | 1948 | if (ret) |
1950 | return ret; | 1949 | return ret; |
1951 | 1950 | ||
1951 | /* We're using qword write, seqno should be aligned to 8 bytes. */ | ||
1952 | BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1); | ||
1953 | |||
1952 | /* w/a for post sync ops following a GPGPU operation we | 1954 | /* w/a for post sync ops following a GPGPU operation we |
1953 | * need a prior CS_STALL, which is emitted by the flush | 1955 | * need a prior CS_STALL, which is emitted by the flush |
1954 | * following the batch. | 1956 | * following the batch. |
1955 | */ | 1957 | */ |
1956 | intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5)); | 1958 | intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); |
1957 | intel_logical_ring_emit(ringbuf, | 1959 | intel_logical_ring_emit(ringbuf, |
1958 | (PIPE_CONTROL_GLOBAL_GTT_IVB | | 1960 | (PIPE_CONTROL_GLOBAL_GTT_IVB | |
1959 | PIPE_CONTROL_CS_STALL | | 1961 | PIPE_CONTROL_CS_STALL | |
@@ -1961,7 +1963,10 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request) | |||
1961 | intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine)); | 1963 | intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine)); |
1962 | intel_logical_ring_emit(ringbuf, 0); | 1964 | intel_logical_ring_emit(ringbuf, 0); |
1963 | intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); | 1965 | intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); |
1966 | /* We're thrashing one dword of HWS. */ | ||
1967 | intel_logical_ring_emit(ringbuf, 0); | ||
1964 | intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); | 1968 | intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); |
1969 | intel_logical_ring_emit(ringbuf, MI_NOOP); | ||
1965 | return intel_logical_ring_advance_and_submit(request); | 1970 | return intel_logical_ring_advance_and_submit(request); |
1966 | } | 1971 | } |
1967 | 1972 | ||
@@ -2048,7 +2053,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) | |||
2048 | i915_gem_batch_pool_fini(&engine->batch_pool); | 2053 | i915_gem_batch_pool_fini(&engine->batch_pool); |
2049 | 2054 | ||
2050 | if (engine->status_page.obj) { | 2055 | if (engine->status_page.obj) { |
2051 | kunmap(sg_page(engine->status_page.obj->pages->sgl)); | 2056 | i915_gem_object_unpin_map(engine->status_page.obj); |
2052 | engine->status_page.obj = NULL; | 2057 | engine->status_page.obj = NULL; |
2053 | } | 2058 | } |
2054 | 2059 | ||
@@ -2087,9 +2092,29 @@ logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift) | |||
2087 | } | 2092 | } |
2088 | 2093 | ||
2089 | static int | 2094 | static int |
2095 | lrc_setup_hws(struct intel_engine_cs *engine, | ||
2096 | struct drm_i915_gem_object *dctx_obj) | ||
2097 | { | ||
2098 | void *hws; | ||
2099 | |||
2100 | /* The HWSP is part of the default context object in LRC mode. */ | ||
2101 | engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) + | ||
2102 | LRC_PPHWSP_PN * PAGE_SIZE; | ||
2103 | hws = i915_gem_object_pin_map(dctx_obj); | ||
2104 | if (IS_ERR(hws)) | ||
2105 | return PTR_ERR(hws); | ||
2106 | engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE; | ||
2107 | engine->status_page.obj = dctx_obj; | ||
2108 | |||
2109 | return 0; | ||
2110 | } | ||
2111 | |||
2112 | static int | ||
2090 | logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine) | 2113 | logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine) |
2091 | { | 2114 | { |
2092 | struct intel_context *dctx = to_i915(dev)->kernel_context; | 2115 | struct drm_i915_private *dev_priv = to_i915(dev); |
2116 | struct intel_context *dctx = dev_priv->kernel_context; | ||
2117 | enum forcewake_domains fw_domains; | ||
2093 | int ret; | 2118 | int ret; |
2094 | 2119 | ||
2095 | /* Intentionally left blank. */ | 2120 | /* Intentionally left blank. */ |
@@ -2111,6 +2136,20 @@ logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine) | |||
2111 | 2136 | ||
2112 | logical_ring_init_platform_invariants(engine); | 2137 | logical_ring_init_platform_invariants(engine); |
2113 | 2138 | ||
2139 | fw_domains = intel_uncore_forcewake_for_reg(dev_priv, | ||
2140 | RING_ELSP(engine), | ||
2141 | FW_REG_WRITE); | ||
2142 | |||
2143 | fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, | ||
2144 | RING_CONTEXT_STATUS_PTR(engine), | ||
2145 | FW_REG_READ | FW_REG_WRITE); | ||
2146 | |||
2147 | fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, | ||
2148 | RING_CONTEXT_STATUS_BUF_BASE(engine), | ||
2149 | FW_REG_READ); | ||
2150 | |||
2151 | engine->fw_domains = fw_domains; | ||
2152 | |||
2114 | ret = i915_cmd_parser_init_ring(engine); | 2153 | ret = i915_cmd_parser_init_ring(engine); |
2115 | if (ret) | 2154 | if (ret) |
2116 | goto error; | 2155 | goto error; |
@@ -2128,6 +2167,13 @@ logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine) | |||
2128 | goto error; | 2167 | goto error; |
2129 | } | 2168 | } |
2130 | 2169 | ||
2170 | /* And setup the hardware status page. */ | ||
2171 | ret = lrc_setup_hws(engine, dctx->engine[engine->id].state); | ||
2172 | if (ret) { | ||
2173 | DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret); | ||
2174 | goto error; | ||
2175 | } | ||
2176 | |||
2131 | return 0; | 2177 | return 0; |
2132 | 2178 | ||
2133 | error: | 2179 | error: |
@@ -2378,15 +2424,16 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine) | |||
2378 | } | 2424 | } |
2379 | 2425 | ||
2380 | static int | 2426 | static int |
2381 | populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj, | 2427 | populate_lr_context(struct intel_context *ctx, |
2428 | struct drm_i915_gem_object *ctx_obj, | ||
2382 | struct intel_engine_cs *engine, | 2429 | struct intel_engine_cs *engine, |
2383 | struct intel_ringbuffer *ringbuf) | 2430 | struct intel_ringbuffer *ringbuf) |
2384 | { | 2431 | { |
2385 | struct drm_device *dev = engine->dev; | 2432 | struct drm_device *dev = engine->dev; |
2386 | struct drm_i915_private *dev_priv = dev->dev_private; | 2433 | struct drm_i915_private *dev_priv = dev->dev_private; |
2387 | struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; | 2434 | struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; |
2388 | struct page *page; | 2435 | void *vaddr; |
2389 | uint32_t *reg_state; | 2436 | u32 *reg_state; |
2390 | int ret; | 2437 | int ret; |
2391 | 2438 | ||
2392 | if (!ppgtt) | 2439 | if (!ppgtt) |
@@ -2398,18 +2445,17 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o | |||
2398 | return ret; | 2445 | return ret; |
2399 | } | 2446 | } |
2400 | 2447 | ||
2401 | ret = i915_gem_object_get_pages(ctx_obj); | 2448 | vaddr = i915_gem_object_pin_map(ctx_obj); |
2402 | if (ret) { | 2449 | if (IS_ERR(vaddr)) { |
2403 | DRM_DEBUG_DRIVER("Could not get object pages\n"); | 2450 | ret = PTR_ERR(vaddr); |
2451 | DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret); | ||
2404 | return ret; | 2452 | return ret; |
2405 | } | 2453 | } |
2406 | 2454 | ctx_obj->dirty = true; | |
2407 | i915_gem_object_pin_pages(ctx_obj); | ||
2408 | 2455 | ||
2409 | /* The second page of the context object contains some fields which must | 2456 | /* The second page of the context object contains some fields which must |
2410 | * be set up prior to the first execution. */ | 2457 | * be set up prior to the first execution. */ |
2411 | page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); | 2458 | reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; |
2412 | reg_state = kmap_atomic(page); | ||
2413 | 2459 | ||
2414 | /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM | 2460 | /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM |
2415 | * commands followed by (reg, value) pairs. The values we are setting here are | 2461 | * commands followed by (reg, value) pairs. The values we are setting here are |
@@ -2514,8 +2560,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o | |||
2514 | make_rpcs(dev)); | 2560 | make_rpcs(dev)); |
2515 | } | 2561 | } |
2516 | 2562 | ||
2517 | kunmap_atomic(reg_state); | 2563 | i915_gem_object_unpin_map(ctx_obj); |
2518 | i915_gem_object_unpin_pages(ctx_obj); | ||
2519 | 2564 | ||
2520 | return 0; | 2565 | return 0; |
2521 | } | 2566 | } |
@@ -2542,6 +2587,7 @@ void intel_lr_context_free(struct intel_context *ctx) | |||
2542 | if (ctx == ctx->i915->kernel_context) { | 2587 | if (ctx == ctx->i915->kernel_context) { |
2543 | intel_unpin_ringbuffer_obj(ringbuf); | 2588 | intel_unpin_ringbuffer_obj(ringbuf); |
2544 | i915_gem_object_ggtt_unpin(ctx_obj); | 2589 | i915_gem_object_ggtt_unpin(ctx_obj); |
2590 | i915_gem_object_unpin_map(ctx_obj); | ||
2545 | } | 2591 | } |
2546 | 2592 | ||
2547 | WARN_ON(ctx->engine[i].pin_count); | 2593 | WARN_ON(ctx->engine[i].pin_count); |
@@ -2588,24 +2634,6 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine) | |||
2588 | return ret; | 2634 | return ret; |
2589 | } | 2635 | } |
2590 | 2636 | ||
2591 | static void lrc_setup_hardware_status_page(struct intel_engine_cs *engine, | ||
2592 | struct drm_i915_gem_object *default_ctx_obj) | ||
2593 | { | ||
2594 | struct drm_i915_private *dev_priv = engine->dev->dev_private; | ||
2595 | struct page *page; | ||
2596 | |||
2597 | /* The HWSP is part of the default context object in LRC mode. */ | ||
2598 | engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj) | ||
2599 | + LRC_PPHWSP_PN * PAGE_SIZE; | ||
2600 | page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN); | ||
2601 | engine->status_page.page_addr = kmap(page); | ||
2602 | engine->status_page.obj = default_ctx_obj; | ||
2603 | |||
2604 | I915_WRITE(RING_HWS_PGA(engine->mmio_base), | ||
2605 | (u32)engine->status_page.gfx_addr); | ||
2606 | POSTING_READ(RING_HWS_PGA(engine->mmio_base)); | ||
2607 | } | ||
2608 | |||
2609 | /** | 2637 | /** |
2610 | * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context | 2638 | * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context |
2611 | * @ctx: LR context to create. | 2639 | * @ctx: LR context to create. |
@@ -2669,13 +2697,12 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx, | |||
2669 | } | 2697 | } |
2670 | 2698 | ||
2671 | ret = engine->init_context(req); | 2699 | ret = engine->init_context(req); |
2700 | i915_add_request_no_flush(req); | ||
2672 | if (ret) { | 2701 | if (ret) { |
2673 | DRM_ERROR("ring init context: %d\n", | 2702 | DRM_ERROR("ring init context: %d\n", |
2674 | ret); | 2703 | ret); |
2675 | i915_gem_request_cancel(req); | ||
2676 | goto error_ringbuf; | 2704 | goto error_ringbuf; |
2677 | } | 2705 | } |
2678 | i915_add_request_no_flush(req); | ||
2679 | } | 2706 | } |
2680 | return 0; | 2707 | return 0; |
2681 | 2708 | ||
@@ -2688,10 +2715,9 @@ error_deref_obj: | |||
2688 | return ret; | 2715 | return ret; |
2689 | } | 2716 | } |
2690 | 2717 | ||
2691 | void intel_lr_context_reset(struct drm_device *dev, | 2718 | void intel_lr_context_reset(struct drm_i915_private *dev_priv, |
2692 | struct intel_context *ctx) | 2719 | struct intel_context *ctx) |
2693 | { | 2720 | { |
2694 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2695 | struct intel_engine_cs *engine; | 2721 | struct intel_engine_cs *engine; |
2696 | 2722 | ||
2697 | for_each_engine(engine, dev_priv) { | 2723 | for_each_engine(engine, dev_priv) { |
@@ -2699,23 +2725,23 @@ void intel_lr_context_reset(struct drm_device *dev, | |||
2699 | ctx->engine[engine->id].state; | 2725 | ctx->engine[engine->id].state; |
2700 | struct intel_ringbuffer *ringbuf = | 2726 | struct intel_ringbuffer *ringbuf = |
2701 | ctx->engine[engine->id].ringbuf; | 2727 | ctx->engine[engine->id].ringbuf; |
2728 | void *vaddr; | ||
2702 | uint32_t *reg_state; | 2729 | uint32_t *reg_state; |
2703 | struct page *page; | ||
2704 | 2730 | ||
2705 | if (!ctx_obj) | 2731 | if (!ctx_obj) |
2706 | continue; | 2732 | continue; |
2707 | 2733 | ||
2708 | if (i915_gem_object_get_pages(ctx_obj)) { | 2734 | vaddr = i915_gem_object_pin_map(ctx_obj); |
2709 | WARN(1, "Failed get_pages for context obj\n"); | 2735 | if (WARN_ON(IS_ERR(vaddr))) |
2710 | continue; | 2736 | continue; |
2711 | } | 2737 | |
2712 | page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); | 2738 | reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; |
2713 | reg_state = kmap_atomic(page); | 2739 | ctx_obj->dirty = true; |
2714 | 2740 | ||
2715 | reg_state[CTX_RING_HEAD+1] = 0; | 2741 | reg_state[CTX_RING_HEAD+1] = 0; |
2716 | reg_state[CTX_RING_TAIL+1] = 0; | 2742 | reg_state[CTX_RING_TAIL+1] = 0; |
2717 | 2743 | ||
2718 | kunmap_atomic(reg_state); | 2744 | i915_gem_object_unpin_map(ctx_obj); |
2719 | 2745 | ||
2720 | ringbuf->head = 0; | 2746 | ringbuf->head = 0; |
2721 | ringbuf->tail = 0; | 2747 | ringbuf->tail = 0; |
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 0b0853eee91e..461f1ef9b5c1 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h | |||
@@ -24,6 +24,8 @@ | |||
24 | #ifndef _INTEL_LRC_H_ | 24 | #ifndef _INTEL_LRC_H_ |
25 | #define _INTEL_LRC_H_ | 25 | #define _INTEL_LRC_H_ |
26 | 26 | ||
27 | #include "intel_ringbuffer.h" | ||
28 | |||
27 | #define GEN8_LR_CONTEXT_ALIGN 4096 | 29 | #define GEN8_LR_CONTEXT_ALIGN 4096 |
28 | 30 | ||
29 | /* Execlists regs */ | 31 | /* Execlists regs */ |
@@ -34,6 +36,7 @@ | |||
34 | #define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3) | 36 | #define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3) |
35 | #define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0) | 37 | #define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0) |
36 | #define CTX_CTRL_RS_CTX_ENABLE (1 << 1) | 38 | #define CTX_CTRL_RS_CTX_ENABLE (1 << 1) |
39 | #define RING_CONTEXT_STATUS_BUF_BASE(ring) _MMIO((ring)->mmio_base + 0x370) | ||
37 | #define RING_CONTEXT_STATUS_BUF_LO(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8) | 40 | #define RING_CONTEXT_STATUS_BUF_LO(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8) |
38 | #define RING_CONTEXT_STATUS_BUF_HI(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4) | 41 | #define RING_CONTEXT_STATUS_BUF_HI(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4) |
39 | #define RING_CONTEXT_STATUS_PTR(ring) _MMIO((ring)->mmio_base + 0x3a0) | 42 | #define RING_CONTEXT_STATUS_PTR(ring) _MMIO((ring)->mmio_base + 0x3a0) |
@@ -103,8 +106,11 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx, | |||
103 | struct intel_engine_cs *engine); | 106 | struct intel_engine_cs *engine); |
104 | void intel_lr_context_unpin(struct intel_context *ctx, | 107 | void intel_lr_context_unpin(struct intel_context *ctx, |
105 | struct intel_engine_cs *engine); | 108 | struct intel_engine_cs *engine); |
106 | void intel_lr_context_reset(struct drm_device *dev, | 109 | |
107 | struct intel_context *ctx); | 110 | struct drm_i915_private; |
111 | |||
112 | void intel_lr_context_reset(struct drm_i915_private *dev_priv, | ||
113 | struct intel_context *ctx); | ||
108 | uint64_t intel_lr_context_descriptor(struct intel_context *ctx, | 114 | uint64_t intel_lr_context_descriptor(struct intel_context *ctx, |
109 | struct intel_engine_cs *engine); | 115 | struct intel_engine_cs *engine); |
110 | 116 | ||
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c index 7c7ac0aa192a..23b8545ad6b0 100644 --- a/drivers/gpu/drm/i915/intel_mocs.c +++ b/drivers/gpu/drm/i915/intel_mocs.c | |||
@@ -128,9 +128,9 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = { | |||
128 | 128 | ||
129 | /** | 129 | /** |
130 | * get_mocs_settings() | 130 | * get_mocs_settings() |
131 | * @dev: DRM device. | 131 | * @dev_priv: i915 device. |
132 | * @table: Output table that will be made to point at appropriate | 132 | * @table: Output table that will be made to point at appropriate |
133 | * MOCS values for the device. | 133 | * MOCS values for the device. |
134 | * | 134 | * |
135 | * This function will return the values of the MOCS table that needs to | 135 | * This function will return the values of the MOCS table that needs to |
136 | * be programmed for the platform. It will return the values that need | 136 | * be programmed for the platform. It will return the values that need |
@@ -138,21 +138,21 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = { | |||
138 | * | 138 | * |
139 | * Return: true if there are applicable MOCS settings for the device. | 139 | * Return: true if there are applicable MOCS settings for the device. |
140 | */ | 140 | */ |
141 | static bool get_mocs_settings(struct drm_device *dev, | 141 | static bool get_mocs_settings(struct drm_i915_private *dev_priv, |
142 | struct drm_i915_mocs_table *table) | 142 | struct drm_i915_mocs_table *table) |
143 | { | 143 | { |
144 | bool result = false; | 144 | bool result = false; |
145 | 145 | ||
146 | if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { | 146 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { |
147 | table->size = ARRAY_SIZE(skylake_mocs_table); | 147 | table->size = ARRAY_SIZE(skylake_mocs_table); |
148 | table->table = skylake_mocs_table; | 148 | table->table = skylake_mocs_table; |
149 | result = true; | 149 | result = true; |
150 | } else if (IS_BROXTON(dev)) { | 150 | } else if (IS_BROXTON(dev_priv)) { |
151 | table->size = ARRAY_SIZE(broxton_mocs_table); | 151 | table->size = ARRAY_SIZE(broxton_mocs_table); |
152 | table->table = broxton_mocs_table; | 152 | table->table = broxton_mocs_table; |
153 | result = true; | 153 | result = true; |
154 | } else { | 154 | } else { |
155 | WARN_ONCE(INTEL_INFO(dev)->gen >= 9, | 155 | WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9, |
156 | "Platform that should have a MOCS table does not.\n"); | 156 | "Platform that should have a MOCS table does not.\n"); |
157 | } | 157 | } |
158 | 158 | ||
@@ -179,10 +179,49 @@ static i915_reg_t mocs_register(enum intel_engine_id ring, int index) | |||
179 | } | 179 | } |
180 | 180 | ||
181 | /** | 181 | /** |
182 | * intel_mocs_init_engine() - emit the mocs control table | ||
183 | * @engine: The engine for whom to emit the registers. | ||
184 | * | ||
185 | * This function simply emits a MI_LOAD_REGISTER_IMM command for the | ||
186 | * given table starting at the given address. | ||
187 | * | ||
188 | * Return: 0 on success, otherwise the error status. | ||
189 | */ | ||
190 | int intel_mocs_init_engine(struct intel_engine_cs *engine) | ||
191 | { | ||
192 | struct drm_i915_private *dev_priv = to_i915(engine->dev); | ||
193 | struct drm_i915_mocs_table table; | ||
194 | unsigned int index; | ||
195 | |||
196 | if (!get_mocs_settings(dev_priv, &table)) | ||
197 | return 0; | ||
198 | |||
199 | if (WARN_ON(table.size > GEN9_NUM_MOCS_ENTRIES)) | ||
200 | return -ENODEV; | ||
201 | |||
202 | for (index = 0; index < table.size; index++) | ||
203 | I915_WRITE(mocs_register(engine->id, index), | ||
204 | table.table[index].control_value); | ||
205 | |||
206 | /* | ||
207 | * Ok, now set the unused entries to uncached. These entries | ||
208 | * are officially undefined and no contract for the contents | ||
209 | * and settings is given for these entries. | ||
210 | * | ||
211 | * Entry 0 in the table is uncached - so we are just writing | ||
212 | * that value to all the used entries. | ||
213 | */ | ||
214 | for (; index < GEN9_NUM_MOCS_ENTRIES; index++) | ||
215 | I915_WRITE(mocs_register(engine->id, index), | ||
216 | table.table[0].control_value); | ||
217 | |||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | /** | ||
182 | * emit_mocs_control_table() - emit the mocs control table | 222 | * emit_mocs_control_table() - emit the mocs control table |
183 | * @req: Request to set up the MOCS table for. | 223 | * @req: Request to set up the MOCS table for. |
184 | * @table: The values to program into the control regs. | 224 | * @table: The values to program into the control regs. |
185 | * @ring: The engine for whom to emit the registers. | ||
186 | * | 225 | * |
187 | * This function simply emits a MI_LOAD_REGISTER_IMM command for the | 226 | * This function simply emits a MI_LOAD_REGISTER_IMM command for the |
188 | * given table starting at the given address. | 227 | * given table starting at the given address. |
@@ -190,10 +229,10 @@ static i915_reg_t mocs_register(enum intel_engine_id ring, int index) | |||
190 | * Return: 0 on success, otherwise the error status. | 229 | * Return: 0 on success, otherwise the error status. |
191 | */ | 230 | */ |
192 | static int emit_mocs_control_table(struct drm_i915_gem_request *req, | 231 | static int emit_mocs_control_table(struct drm_i915_gem_request *req, |
193 | const struct drm_i915_mocs_table *table, | 232 | const struct drm_i915_mocs_table *table) |
194 | enum intel_engine_id ring) | ||
195 | { | 233 | { |
196 | struct intel_ringbuffer *ringbuf = req->ringbuf; | 234 | struct intel_ringbuffer *ringbuf = req->ringbuf; |
235 | enum intel_engine_id engine = req->engine->id; | ||
197 | unsigned int index; | 236 | unsigned int index; |
198 | int ret; | 237 | int ret; |
199 | 238 | ||
@@ -210,7 +249,8 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req, | |||
210 | MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES)); | 249 | MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES)); |
211 | 250 | ||
212 | for (index = 0; index < table->size; index++) { | 251 | for (index = 0; index < table->size; index++) { |
213 | intel_logical_ring_emit_reg(ringbuf, mocs_register(ring, index)); | 252 | intel_logical_ring_emit_reg(ringbuf, |
253 | mocs_register(engine, index)); | ||
214 | intel_logical_ring_emit(ringbuf, | 254 | intel_logical_ring_emit(ringbuf, |
215 | table->table[index].control_value); | 255 | table->table[index].control_value); |
216 | } | 256 | } |
@@ -224,8 +264,10 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req, | |||
224 | * that value to all the used entries. | 264 | * that value to all the used entries. |
225 | */ | 265 | */ |
226 | for (; index < GEN9_NUM_MOCS_ENTRIES; index++) { | 266 | for (; index < GEN9_NUM_MOCS_ENTRIES; index++) { |
227 | intel_logical_ring_emit_reg(ringbuf, mocs_register(ring, index)); | 267 | intel_logical_ring_emit_reg(ringbuf, |
228 | intel_logical_ring_emit(ringbuf, table->table[0].control_value); | 268 | mocs_register(engine, index)); |
269 | intel_logical_ring_emit(ringbuf, | ||
270 | table->table[0].control_value); | ||
229 | } | 271 | } |
230 | 272 | ||
231 | intel_logical_ring_emit(ringbuf, MI_NOOP); | 273 | intel_logical_ring_emit(ringbuf, MI_NOOP); |
@@ -234,6 +276,14 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req, | |||
234 | return 0; | 276 | return 0; |
235 | } | 277 | } |
236 | 278 | ||
279 | static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table, | ||
280 | u16 low, | ||
281 | u16 high) | ||
282 | { | ||
283 | return table->table[low].l3cc_value | | ||
284 | table->table[high].l3cc_value << 16; | ||
285 | } | ||
286 | |||
237 | /** | 287 | /** |
238 | * emit_mocs_l3cc_table() - emit the mocs control table | 288 | * emit_mocs_l3cc_table() - emit the mocs control table |
239 | * @req: Request to set up the MOCS table for. | 289 | * @req: Request to set up the MOCS table for. |
@@ -249,11 +299,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req, | |||
249 | const struct drm_i915_mocs_table *table) | 299 | const struct drm_i915_mocs_table *table) |
250 | { | 300 | { |
251 | struct intel_ringbuffer *ringbuf = req->ringbuf; | 301 | struct intel_ringbuffer *ringbuf = req->ringbuf; |
252 | unsigned int count; | ||
253 | unsigned int i; | 302 | unsigned int i; |
254 | u32 value; | ||
255 | u32 filler = (table->table[0].l3cc_value & 0xffff) | | ||
256 | ((table->table[0].l3cc_value & 0xffff) << 16); | ||
257 | int ret; | 303 | int ret; |
258 | 304 | ||
259 | if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES)) | 305 | if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES)) |
@@ -268,20 +314,18 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req, | |||
268 | intel_logical_ring_emit(ringbuf, | 314 | intel_logical_ring_emit(ringbuf, |
269 | MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2)); | 315 | MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2)); |
270 | 316 | ||
271 | for (i = 0, count = 0; i < table->size / 2; i++, count += 2) { | 317 | for (i = 0; i < table->size/2; i++) { |
272 | value = (table->table[count].l3cc_value & 0xffff) | | ||
273 | ((table->table[count + 1].l3cc_value & 0xffff) << 16); | ||
274 | |||
275 | intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i)); | 318 | intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i)); |
276 | intel_logical_ring_emit(ringbuf, value); | 319 | intel_logical_ring_emit(ringbuf, |
320 | l3cc_combine(table, 2*i, 2*i+1)); | ||
277 | } | 321 | } |
278 | 322 | ||
279 | if (table->size & 0x01) { | 323 | if (table->size & 0x01) { |
280 | /* Odd table size - 1 left over */ | 324 | /* Odd table size - 1 left over */ |
281 | value = (table->table[count].l3cc_value & 0xffff) | | 325 | intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i)); |
282 | ((table->table[0].l3cc_value & 0xffff) << 16); | 326 | intel_logical_ring_emit(ringbuf, l3cc_combine(table, 2*i, 0)); |
283 | } else | 327 | i++; |
284 | value = filler; | 328 | } |
285 | 329 | ||
286 | /* | 330 | /* |
287 | * Now set the rest of the table to uncached - use entry 0 as | 331 | * Now set the rest of the table to uncached - use entry 0 as |
@@ -290,9 +334,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req, | |||
290 | */ | 334 | */ |
291 | for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) { | 335 | for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) { |
292 | intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i)); | 336 | intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i)); |
293 | intel_logical_ring_emit(ringbuf, value); | 337 | intel_logical_ring_emit(ringbuf, l3cc_combine(table, 0, 0)); |
294 | |||
295 | value = filler; | ||
296 | } | 338 | } |
297 | 339 | ||
298 | intel_logical_ring_emit(ringbuf, MI_NOOP); | 340 | intel_logical_ring_emit(ringbuf, MI_NOOP); |
@@ -302,6 +344,47 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req, | |||
302 | } | 344 | } |
303 | 345 | ||
304 | /** | 346 | /** |
347 | * intel_mocs_init_l3cc_table() - program the mocs control table | ||
348 | * @dev: The the device to be programmed. | ||
349 | * | ||
350 | * This function simply programs the mocs registers for the given table | ||
351 | * starting at the given address. This register set is programmed in pairs. | ||
352 | * | ||
353 | * These registers may get programmed more than once, it is simpler to | ||
354 | * re-program 32 registers than maintain the state of when they were programmed. | ||
355 | * We are always reprogramming with the same values and this only on context | ||
356 | * start. | ||
357 | * | ||
358 | * Return: Nothing. | ||
359 | */ | ||
360 | void intel_mocs_init_l3cc_table(struct drm_device *dev) | ||
361 | { | ||
362 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
363 | struct drm_i915_mocs_table table; | ||
364 | unsigned int i; | ||
365 | |||
366 | if (!get_mocs_settings(dev_priv, &table)) | ||
367 | return; | ||
368 | |||
369 | for (i = 0; i < table.size/2; i++) | ||
370 | I915_WRITE(GEN9_LNCFCMOCS(i), l3cc_combine(&table, 2*i, 2*i+1)); | ||
371 | |||
372 | /* Odd table size - 1 left over */ | ||
373 | if (table.size & 0x01) { | ||
374 | I915_WRITE(GEN9_LNCFCMOCS(i), l3cc_combine(&table, 2*i, 0)); | ||
375 | i++; | ||
376 | } | ||
377 | |||
378 | /* | ||
379 | * Now set the rest of the table to uncached - use entry 0 as | ||
380 | * this will be uncached. Leave the last pair as initialised as | ||
381 | * they are reserved by the hardware. | ||
382 | */ | ||
383 | for (; i < (GEN9_NUM_MOCS_ENTRIES / 2); i++) | ||
384 | I915_WRITE(GEN9_LNCFCMOCS(i), l3cc_combine(&table, 0, 0)); | ||
385 | } | ||
386 | |||
387 | /** | ||
305 | * intel_rcs_context_init_mocs() - program the MOCS register. | 388 | * intel_rcs_context_init_mocs() - program the MOCS register. |
306 | * @req: Request to set up the MOCS tables for. | 389 | * @req: Request to set up the MOCS tables for. |
307 | * | 390 | * |
@@ -322,17 +405,11 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req) | |||
322 | struct drm_i915_mocs_table t; | 405 | struct drm_i915_mocs_table t; |
323 | int ret; | 406 | int ret; |
324 | 407 | ||
325 | if (get_mocs_settings(req->engine->dev, &t)) { | 408 | if (get_mocs_settings(req->i915, &t)) { |
326 | struct drm_i915_private *dev_priv = req->i915; | 409 | /* Program the RCS control registers */ |
327 | struct intel_engine_cs *engine; | 410 | ret = emit_mocs_control_table(req, &t); |
328 | enum intel_engine_id id; | 411 | if (ret) |
329 | 412 | return ret; | |
330 | /* Program the control registers */ | ||
331 | for_each_engine_id(engine, dev_priv, id) { | ||
332 | ret = emit_mocs_control_table(req, &t, id); | ||
333 | if (ret) | ||
334 | return ret; | ||
335 | } | ||
336 | 413 | ||
337 | /* Now program the l3cc registers */ | 414 | /* Now program the l3cc registers */ |
338 | ret = emit_mocs_l3cc_table(req, &t); | 415 | ret = emit_mocs_l3cc_table(req, &t); |
diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h index 76e45b1748b3..4640299e04ec 100644 --- a/drivers/gpu/drm/i915/intel_mocs.h +++ b/drivers/gpu/drm/i915/intel_mocs.h | |||
@@ -53,5 +53,7 @@ | |||
53 | #include "i915_drv.h" | 53 | #include "i915_drv.h" |
54 | 54 | ||
55 | int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req); | 55 | int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req); |
56 | void intel_mocs_init_l3cc_table(struct drm_device *dev); | ||
57 | int intel_mocs_init_engine(struct intel_engine_cs *ring); | ||
56 | 58 | ||
57 | #endif | 59 | #endif |
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index c15718b4862a..99e26034ae8d 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
@@ -34,12 +34,6 @@ | |||
34 | #include "i915_drv.h" | 34 | #include "i915_drv.h" |
35 | #include "intel_drv.h" | 35 | #include "intel_drv.h" |
36 | 36 | ||
37 | #define PCI_ASLE 0xe4 | ||
38 | #define PCI_ASLS 0xfc | ||
39 | #define PCI_SWSCI 0xe8 | ||
40 | #define PCI_SWSCI_SCISEL (1 << 15) | ||
41 | #define PCI_SWSCI_GSSCIE (1 << 0) | ||
42 | |||
43 | #define OPREGION_HEADER_OFFSET 0 | 37 | #define OPREGION_HEADER_OFFSET 0 |
44 | #define OPREGION_ACPI_OFFSET 0x100 | 38 | #define OPREGION_ACPI_OFFSET 0x100 |
45 | #define ACPI_CLID 0x01ac /* current lid state indicator */ | 39 | #define ACPI_CLID 0x01ac /* current lid state indicator */ |
@@ -246,13 +240,12 @@ struct opregion_asle_ext { | |||
246 | 240 | ||
247 | #define MAX_DSLP 1500 | 241 | #define MAX_DSLP 1500 |
248 | 242 | ||
249 | #ifdef CONFIG_ACPI | ||
250 | static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) | 243 | static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) |
251 | { | 244 | { |
252 | struct drm_i915_private *dev_priv = dev->dev_private; | 245 | struct drm_i915_private *dev_priv = dev->dev_private; |
253 | struct opregion_swsci *swsci = dev_priv->opregion.swsci; | 246 | struct opregion_swsci *swsci = dev_priv->opregion.swsci; |
254 | u32 main_function, sub_function, scic; | 247 | u32 main_function, sub_function, scic; |
255 | u16 pci_swsci; | 248 | u16 swsci_val; |
256 | u32 dslp; | 249 | u32 dslp; |
257 | 250 | ||
258 | if (!swsci) | 251 | if (!swsci) |
@@ -300,16 +293,16 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) | |||
300 | swsci->scic = scic; | 293 | swsci->scic = scic; |
301 | 294 | ||
302 | /* Ensure SCI event is selected and event trigger is cleared. */ | 295 | /* Ensure SCI event is selected and event trigger is cleared. */ |
303 | pci_read_config_word(dev->pdev, PCI_SWSCI, &pci_swsci); | 296 | pci_read_config_word(dev->pdev, SWSCI, &swsci_val); |
304 | if (!(pci_swsci & PCI_SWSCI_SCISEL) || (pci_swsci & PCI_SWSCI_GSSCIE)) { | 297 | if (!(swsci_val & SWSCI_SCISEL) || (swsci_val & SWSCI_GSSCIE)) { |
305 | pci_swsci |= PCI_SWSCI_SCISEL; | 298 | swsci_val |= SWSCI_SCISEL; |
306 | pci_swsci &= ~PCI_SWSCI_GSSCIE; | 299 | swsci_val &= ~SWSCI_GSSCIE; |
307 | pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci); | 300 | pci_write_config_word(dev->pdev, SWSCI, swsci_val); |
308 | } | 301 | } |
309 | 302 | ||
310 | /* Use event trigger to tell bios to check the mail. */ | 303 | /* Use event trigger to tell bios to check the mail. */ |
311 | pci_swsci |= PCI_SWSCI_GSSCIE; | 304 | swsci_val |= SWSCI_GSSCIE; |
312 | pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci); | 305 | pci_write_config_word(dev->pdev, SWSCI, swsci_val); |
313 | 306 | ||
314 | /* Poll for the result. */ | 307 | /* Poll for the result. */ |
315 | #define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0) | 308 | #define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0) |
@@ -905,9 +898,6 @@ static void swsci_setup(struct drm_device *dev) | |||
905 | opregion->swsci_gbda_sub_functions, | 898 | opregion->swsci_gbda_sub_functions, |
906 | opregion->swsci_sbcb_sub_functions); | 899 | opregion->swsci_sbcb_sub_functions); |
907 | } | 900 | } |
908 | #else /* CONFIG_ACPI */ | ||
909 | static inline void swsci_setup(struct drm_device *dev) {} | ||
910 | #endif /* CONFIG_ACPI */ | ||
911 | 901 | ||
912 | static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id) | 902 | static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id) |
913 | { | 903 | { |
@@ -943,16 +933,14 @@ int intel_opregion_setup(struct drm_device *dev) | |||
943 | BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100); | 933 | BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100); |
944 | BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400); | 934 | BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400); |
945 | 935 | ||
946 | pci_read_config_dword(dev->pdev, PCI_ASLS, &asls); | 936 | pci_read_config_dword(dev->pdev, ASLS, &asls); |
947 | DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls); | 937 | DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls); |
948 | if (asls == 0) { | 938 | if (asls == 0) { |
949 | DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n"); | 939 | DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n"); |
950 | return -ENOTSUPP; | 940 | return -ENOTSUPP; |
951 | } | 941 | } |
952 | 942 | ||
953 | #ifdef CONFIG_ACPI | ||
954 | INIT_WORK(&opregion->asle_work, asle_work); | 943 | INIT_WORK(&opregion->asle_work, asle_work); |
955 | #endif | ||
956 | 944 | ||
957 | base = memremap(asls, OPREGION_SIZE, MEMREMAP_WB); | 945 | base = memremap(asls, OPREGION_SIZE, MEMREMAP_WB); |
958 | if (!base) | 946 | if (!base) |
@@ -1024,3 +1012,31 @@ err_out: | |||
1024 | memunmap(base); | 1012 | memunmap(base); |
1025 | return err; | 1013 | return err; |
1026 | } | 1014 | } |
1015 | |||
1016 | int | ||
1017 | intel_opregion_get_panel_type(struct drm_device *dev) | ||
1018 | { | ||
1019 | u32 panel_details; | ||
1020 | int ret; | ||
1021 | |||
1022 | ret = swsci(dev, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details); | ||
1023 | if (ret) { | ||
1024 | DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n", | ||
1025 | ret); | ||
1026 | return ret; | ||
1027 | } | ||
1028 | |||
1029 | ret = (panel_details >> 8) & 0xff; | ||
1030 | if (ret > 0x10) { | ||
1031 | DRM_DEBUG_KMS("Invalid OpRegion panel type 0x%x\n", ret); | ||
1032 | return -EINVAL; | ||
1033 | } | ||
1034 | |||
1035 | /* fall back to VBT panel type? */ | ||
1036 | if (ret == 0x0) { | ||
1037 | DRM_DEBUG_KMS("No panel type in OpRegion\n"); | ||
1038 | return -ENODEV; | ||
1039 | } | ||
1040 | |||
1041 | return ret - 1; | ||
1042 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 6694e9230cd5..bcc3b6a016d8 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -247,7 +247,7 @@ static int intel_overlay_on(struct intel_overlay *overlay) | |||
247 | 247 | ||
248 | ret = intel_ring_begin(req, 4); | 248 | ret = intel_ring_begin(req, 4); |
249 | if (ret) { | 249 | if (ret) { |
250 | i915_gem_request_cancel(req); | 250 | i915_add_request_no_flush(req); |
251 | return ret; | 251 | return ret; |
252 | } | 252 | } |
253 | 253 | ||
@@ -290,7 +290,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay, | |||
290 | 290 | ||
291 | ret = intel_ring_begin(req, 2); | 291 | ret = intel_ring_begin(req, 2); |
292 | if (ret) { | 292 | if (ret) { |
293 | i915_gem_request_cancel(req); | 293 | i915_add_request_no_flush(req); |
294 | return ret; | 294 | return ret; |
295 | } | 295 | } |
296 | 296 | ||
@@ -356,7 +356,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) | |||
356 | 356 | ||
357 | ret = intel_ring_begin(req, 6); | 357 | ret = intel_ring_begin(req, 6); |
358 | if (ret) { | 358 | if (ret) { |
359 | i915_gem_request_cancel(req); | 359 | i915_add_request_no_flush(req); |
360 | return ret; | 360 | return ret; |
361 | } | 361 | } |
362 | 362 | ||
@@ -431,7 +431,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) | |||
431 | 431 | ||
432 | ret = intel_ring_begin(req, 2); | 432 | ret = intel_ring_begin(req, 2); |
433 | if (ret) { | 433 | if (ret) { |
434 | i915_gem_request_cancel(req); | 434 | i915_add_request_no_flush(req); |
435 | return ret; | 435 | return ret; |
436 | } | 436 | } |
437 | 437 | ||
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 8c8996fcbaf5..a0788763757b 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -504,7 +504,7 @@ static u32 i9xx_get_backlight(struct intel_connector *connector) | |||
504 | if (panel->backlight.combination_mode) { | 504 | if (panel->backlight.combination_mode) { |
505 | u8 lbpc; | 505 | u8 lbpc; |
506 | 506 | ||
507 | pci_read_config_byte(dev_priv->dev->pdev, PCI_LBPC, &lbpc); | 507 | pci_read_config_byte(dev_priv->dev->pdev, LBPC, &lbpc); |
508 | val *= lbpc; | 508 | val *= lbpc; |
509 | } | 509 | } |
510 | 510 | ||
@@ -592,7 +592,7 @@ static void i9xx_set_backlight(struct intel_connector *connector, u32 level) | |||
592 | 592 | ||
593 | lbpc = level * 0xfe / panel->backlight.max + 1; | 593 | lbpc = level * 0xfe / panel->backlight.max + 1; |
594 | level /= lbpc; | 594 | level /= lbpc; |
595 | pci_write_config_byte(dev_priv->dev->pdev, PCI_LBPC, lbpc); | 595 | pci_write_config_byte(dev_priv->dev->pdev, LBPC, lbpc); |
596 | } | 596 | } |
597 | 597 | ||
598 | if (IS_GEN4(dev_priv)) { | 598 | if (IS_GEN4(dev_priv)) { |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 43b24a1f5ee6..695a464a5e64 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -2483,7 +2483,7 @@ static void ilk_wm_merge(struct drm_device *dev, | |||
2483 | /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */ | 2483 | /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */ |
2484 | if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) && | 2484 | if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) && |
2485 | config->num_pipes_active > 1) | 2485 | config->num_pipes_active > 1) |
2486 | return; | 2486 | last_enabled_level = 0; |
2487 | 2487 | ||
2488 | /* ILK: FBC WM must be disabled always */ | 2488 | /* ILK: FBC WM must be disabled always */ |
2489 | merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6; | 2489 | merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6; |
@@ -4587,7 +4587,7 @@ void intel_set_rps(struct drm_device *dev, u8 val) | |||
4587 | gen6_set_rps(dev, val); | 4587 | gen6_set_rps(dev, val); |
4588 | } | 4588 | } |
4589 | 4589 | ||
4590 | static void gen9_disable_rps(struct drm_device *dev) | 4590 | static void gen9_disable_rc6(struct drm_device *dev) |
4591 | { | 4591 | { |
4592 | struct drm_i915_private *dev_priv = dev->dev_private; | 4592 | struct drm_i915_private *dev_priv = dev->dev_private; |
4593 | 4593 | ||
@@ -4595,12 +4595,20 @@ static void gen9_disable_rps(struct drm_device *dev) | |||
4595 | I915_WRITE(GEN9_PG_ENABLE, 0); | 4595 | I915_WRITE(GEN9_PG_ENABLE, 0); |
4596 | } | 4596 | } |
4597 | 4597 | ||
4598 | static void gen9_disable_rps(struct drm_device *dev) | ||
4599 | { | ||
4600 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4601 | |||
4602 | I915_WRITE(GEN6_RP_CONTROL, 0); | ||
4603 | } | ||
4604 | |||
4598 | static void gen6_disable_rps(struct drm_device *dev) | 4605 | static void gen6_disable_rps(struct drm_device *dev) |
4599 | { | 4606 | { |
4600 | struct drm_i915_private *dev_priv = dev->dev_private; | 4607 | struct drm_i915_private *dev_priv = dev->dev_private; |
4601 | 4608 | ||
4602 | I915_WRITE(GEN6_RC_CONTROL, 0); | 4609 | I915_WRITE(GEN6_RC_CONTROL, 0); |
4603 | I915_WRITE(GEN6_RPNSWREQ, 1 << 31); | 4610 | I915_WRITE(GEN6_RPNSWREQ, 1 << 31); |
4611 | I915_WRITE(GEN6_RP_CONTROL, 0); | ||
4604 | } | 4612 | } |
4605 | 4613 | ||
4606 | static void cherryview_disable_rps(struct drm_device *dev) | 4614 | static void cherryview_disable_rps(struct drm_device *dev) |
@@ -4804,6 +4812,16 @@ static void gen9_enable_rps(struct drm_device *dev) | |||
4804 | 4812 | ||
4805 | /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ | 4813 | /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ |
4806 | if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { | 4814 | if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { |
4815 | /* | ||
4816 | * BIOS could leave the Hw Turbo enabled, so need to explicitly | ||
4817 | * clear out the Control register just to avoid inconsitency | ||
4818 | * with debugfs interface, which will show Turbo as enabled | ||
4819 | * only and that is not expected by the User after adding the | ||
4820 | * WaGsvDisableTurbo. Apart from this there is no problem even | ||
4821 | * if the Turbo is left enabled in the Control register, as the | ||
4822 | * Up/Down interrupts would remain masked. | ||
4823 | */ | ||
4824 | gen9_disable_rps(dev); | ||
4807 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | 4825 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); |
4808 | return; | 4826 | return; |
4809 | } | 4827 | } |
@@ -4997,7 +5015,8 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
4997 | I915_WRITE(GEN6_RC_STATE, 0); | 5015 | I915_WRITE(GEN6_RC_STATE, 0); |
4998 | 5016 | ||
4999 | /* Clear the DBG now so we don't confuse earlier errors */ | 5017 | /* Clear the DBG now so we don't confuse earlier errors */ |
5000 | if ((gtfifodbg = I915_READ(GTFIFODBG))) { | 5018 | gtfifodbg = I915_READ(GTFIFODBG); |
5019 | if (gtfifodbg) { | ||
5001 | DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg); | 5020 | DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg); |
5002 | I915_WRITE(GTFIFODBG, gtfifodbg); | 5021 | I915_WRITE(GTFIFODBG, gtfifodbg); |
5003 | } | 5022 | } |
@@ -5528,7 +5547,8 @@ static void cherryview_enable_rps(struct drm_device *dev) | |||
5528 | 5547 | ||
5529 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 5548 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
5530 | 5549 | ||
5531 | gtfifodbg = I915_READ(GTFIFODBG); | 5550 | gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV | |
5551 | GT_FIFO_FREE_ENTRIES_CHV); | ||
5532 | if (gtfifodbg) { | 5552 | if (gtfifodbg) { |
5533 | DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", | 5553 | DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", |
5534 | gtfifodbg); | 5554 | gtfifodbg); |
@@ -5627,7 +5647,8 @@ static void valleyview_enable_rps(struct drm_device *dev) | |||
5627 | 5647 | ||
5628 | valleyview_check_pctx(dev_priv); | 5648 | valleyview_check_pctx(dev_priv); |
5629 | 5649 | ||
5630 | if ((gtfifodbg = I915_READ(GTFIFODBG))) { | 5650 | gtfifodbg = I915_READ(GTFIFODBG); |
5651 | if (gtfifodbg) { | ||
5631 | DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", | 5652 | DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", |
5632 | gtfifodbg); | 5653 | gtfifodbg); |
5633 | I915_WRITE(GTFIFODBG, gtfifodbg); | 5654 | I915_WRITE(GTFIFODBG, gtfifodbg); |
@@ -6265,9 +6286,10 @@ void intel_disable_gt_powersave(struct drm_device *dev) | |||
6265 | intel_suspend_gt_powersave(dev); | 6286 | intel_suspend_gt_powersave(dev); |
6266 | 6287 | ||
6267 | mutex_lock(&dev_priv->rps.hw_lock); | 6288 | mutex_lock(&dev_priv->rps.hw_lock); |
6268 | if (INTEL_INFO(dev)->gen >= 9) | 6289 | if (INTEL_INFO(dev)->gen >= 9) { |
6290 | gen9_disable_rc6(dev); | ||
6269 | gen9_disable_rps(dev); | 6291 | gen9_disable_rps(dev); |
6270 | else if (IS_CHERRYVIEW(dev)) | 6292 | } else if (IS_CHERRYVIEW(dev)) |
6271 | cherryview_disable_rps(dev); | 6293 | cherryview_disable_rps(dev); |
6272 | else if (IS_VALLEYVIEW(dev)) | 6294 | else if (IS_VALLEYVIEW(dev)) |
6273 | valleyview_disable_rps(dev); | 6295 | valleyview_disable_rps(dev); |
@@ -6882,23 +6904,10 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) | |||
6882 | gen6_check_mch_setup(dev); | 6904 | gen6_check_mch_setup(dev); |
6883 | } | 6905 | } |
6884 | 6906 | ||
6885 | static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) | ||
6886 | { | ||
6887 | I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); | ||
6888 | |||
6889 | /* | ||
6890 | * Disable trickle feed and enable pnd deadline calculation | ||
6891 | */ | ||
6892 | I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); | ||
6893 | I915_WRITE(CBR1_VLV, 0); | ||
6894 | } | ||
6895 | |||
6896 | static void valleyview_init_clock_gating(struct drm_device *dev) | 6907 | static void valleyview_init_clock_gating(struct drm_device *dev) |
6897 | { | 6908 | { |
6898 | struct drm_i915_private *dev_priv = dev->dev_private; | 6909 | struct drm_i915_private *dev_priv = dev->dev_private; |
6899 | 6910 | ||
6900 | vlv_init_display_clock_gating(dev_priv); | ||
6901 | |||
6902 | /* WaDisableEarlyCull:vlv */ | 6911 | /* WaDisableEarlyCull:vlv */ |
6903 | I915_WRITE(_3D_CHICKEN3, | 6912 | I915_WRITE(_3D_CHICKEN3, |
6904 | _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); | 6913 | _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); |
@@ -6981,8 +6990,6 @@ static void cherryview_init_clock_gating(struct drm_device *dev) | |||
6981 | { | 6990 | { |
6982 | struct drm_i915_private *dev_priv = dev->dev_private; | 6991 | struct drm_i915_private *dev_priv = dev->dev_private; |
6983 | 6992 | ||
6984 | vlv_init_display_clock_gating(dev_priv); | ||
6985 | |||
6986 | /* WaVSRefCountFullforceMissDisable:chv */ | 6993 | /* WaVSRefCountFullforceMissDisable:chv */ |
6987 | /* WaDSRefCountFullforceMissDisable:chv */ | 6994 | /* WaDSRefCountFullforceMissDisable:chv */ |
6988 | I915_WRITE(GEN7_FF_THREAD_MODE, | 6995 | I915_WRITE(GEN7_FF_THREAD_MODE, |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 41b604e69db7..245386e20c52 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -959,9 +959,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) | |||
959 | } | 959 | } |
960 | 960 | ||
961 | /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */ | 961 | /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */ |
962 | if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER) || IS_BROXTON(dev)) | 962 | /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt */ |
963 | WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, | 963 | WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, |
964 | GEN9_ENABLE_YV12_BUGFIX); | 964 | GEN9_ENABLE_YV12_BUGFIX | |
965 | GEN9_ENABLE_GPGPU_PREEMPTION); | ||
965 | 966 | ||
966 | /* Wa4x4STCOptimizationDisable:skl,bxt */ | 967 | /* Wa4x4STCOptimizationDisable:skl,bxt */ |
967 | /* WaDisablePartialResolveInVc:skl,bxt */ | 968 | /* WaDisablePartialResolveInVc:skl,bxt */ |
@@ -980,7 +981,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) | |||
980 | 981 | ||
981 | /* WaForceContextSaveRestoreNonCoherent:skl,bxt */ | 982 | /* WaForceContextSaveRestoreNonCoherent:skl,bxt */ |
982 | tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT; | 983 | tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT; |
983 | if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) || | 984 | if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) || |
984 | IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER)) | 985 | IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER)) |
985 | tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; | 986 | tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; |
986 | WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); | 987 | WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); |
@@ -1097,7 +1098,8 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) | |||
1097 | WA_SET_BIT_MASKED(HIZ_CHICKEN, | 1098 | WA_SET_BIT_MASKED(HIZ_CHICKEN, |
1098 | BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); | 1099 | BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); |
1099 | 1100 | ||
1100 | if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) { | 1101 | /* This is tied to WaForceContextSaveRestoreNonCoherent */ |
1102 | if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) { | ||
1101 | /* | 1103 | /* |
1102 | *Use Force Non-Coherent whenever executing a 3D context. This | 1104 | *Use Force Non-Coherent whenever executing a 3D context. This |
1103 | * is a workaround for a possible hang in the unlikely event | 1105 | * is a workaround for a possible hang in the unlikely event |
@@ -2086,6 +2088,7 @@ void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) | |||
2086 | i915_gem_object_unpin_map(ringbuf->obj); | 2088 | i915_gem_object_unpin_map(ringbuf->obj); |
2087 | else | 2089 | else |
2088 | iounmap(ringbuf->virtual_start); | 2090 | iounmap(ringbuf->virtual_start); |
2091 | ringbuf->virtual_start = NULL; | ||
2089 | ringbuf->vma = NULL; | 2092 | ringbuf->vma = NULL; |
2090 | i915_gem_object_ggtt_unpin(ringbuf->obj); | 2093 | i915_gem_object_ggtt_unpin(ringbuf->obj); |
2091 | } | 2094 | } |
@@ -2096,10 +2099,13 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, | |||
2096 | struct drm_i915_private *dev_priv = to_i915(dev); | 2099 | struct drm_i915_private *dev_priv = to_i915(dev); |
2097 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | 2100 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
2098 | struct drm_i915_gem_object *obj = ringbuf->obj; | 2101 | struct drm_i915_gem_object *obj = ringbuf->obj; |
2102 | /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ | ||
2103 | unsigned flags = PIN_OFFSET_BIAS | 4096; | ||
2104 | void *addr; | ||
2099 | int ret; | 2105 | int ret; |
2100 | 2106 | ||
2101 | if (HAS_LLC(dev_priv) && !obj->stolen) { | 2107 | if (HAS_LLC(dev_priv) && !obj->stolen) { |
2102 | ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0); | 2108 | ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags); |
2103 | if (ret) | 2109 | if (ret) |
2104 | return ret; | 2110 | return ret; |
2105 | 2111 | ||
@@ -2107,13 +2113,14 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, | |||
2107 | if (ret) | 2113 | if (ret) |
2108 | goto err_unpin; | 2114 | goto err_unpin; |
2109 | 2115 | ||
2110 | ringbuf->virtual_start = i915_gem_object_pin_map(obj); | 2116 | addr = i915_gem_object_pin_map(obj); |
2111 | if (ringbuf->virtual_start == NULL) { | 2117 | if (IS_ERR(addr)) { |
2112 | ret = -ENOMEM; | 2118 | ret = PTR_ERR(addr); |
2113 | goto err_unpin; | 2119 | goto err_unpin; |
2114 | } | 2120 | } |
2115 | } else { | 2121 | } else { |
2116 | ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); | 2122 | ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, |
2123 | flags | PIN_MAPPABLE); | ||
2117 | if (ret) | 2124 | if (ret) |
2118 | return ret; | 2125 | return ret; |
2119 | 2126 | ||
@@ -2124,14 +2131,15 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, | |||
2124 | /* Access through the GTT requires the device to be awake. */ | 2131 | /* Access through the GTT requires the device to be awake. */ |
2125 | assert_rpm_wakelock_held(dev_priv); | 2132 | assert_rpm_wakelock_held(dev_priv); |
2126 | 2133 | ||
2127 | ringbuf->virtual_start = ioremap_wc(ggtt->mappable_base + | 2134 | addr = ioremap_wc(ggtt->mappable_base + |
2128 | i915_gem_obj_ggtt_offset(obj), ringbuf->size); | 2135 | i915_gem_obj_ggtt_offset(obj), ringbuf->size); |
2129 | if (ringbuf->virtual_start == NULL) { | 2136 | if (addr == NULL) { |
2130 | ret = -ENOMEM; | 2137 | ret = -ENOMEM; |
2131 | goto err_unpin; | 2138 | goto err_unpin; |
2132 | } | 2139 | } |
2133 | } | 2140 | } |
2134 | 2141 | ||
2142 | ringbuf->virtual_start = addr; | ||
2135 | ringbuf->vma = i915_gem_obj_to_ggtt(obj); | 2143 | ringbuf->vma = i915_gem_obj_to_ggtt(obj); |
2136 | return 0; | 2144 | return 0; |
2137 | 2145 | ||
@@ -2363,8 +2371,7 @@ int intel_engine_idle(struct intel_engine_cs *engine) | |||
2363 | 2371 | ||
2364 | /* Make sure we do not trigger any retires */ | 2372 | /* Make sure we do not trigger any retires */ |
2365 | return __i915_wait_request(req, | 2373 | return __i915_wait_request(req, |
2366 | atomic_read(&to_i915(engine->dev)->gpu_error.reset_counter), | 2374 | req->i915->mm.interruptible, |
2367 | to_i915(engine->dev)->mm.interruptible, | ||
2368 | NULL, NULL); | 2375 | NULL, NULL); |
2369 | } | 2376 | } |
2370 | 2377 | ||
@@ -2486,19 +2493,9 @@ static int __intel_ring_prepare(struct intel_engine_cs *engine, int bytes) | |||
2486 | int intel_ring_begin(struct drm_i915_gem_request *req, | 2493 | int intel_ring_begin(struct drm_i915_gem_request *req, |
2487 | int num_dwords) | 2494 | int num_dwords) |
2488 | { | 2495 | { |
2489 | struct intel_engine_cs *engine; | 2496 | struct intel_engine_cs *engine = req->engine; |
2490 | struct drm_i915_private *dev_priv; | ||
2491 | int ret; | 2497 | int ret; |
2492 | 2498 | ||
2493 | WARN_ON(req == NULL); | ||
2494 | engine = req->engine; | ||
2495 | dev_priv = req->i915; | ||
2496 | |||
2497 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, | ||
2498 | dev_priv->mm.interruptible); | ||
2499 | if (ret) | ||
2500 | return ret; | ||
2501 | |||
2502 | ret = __intel_ring_prepare(engine, num_dwords * sizeof(uint32_t)); | 2499 | ret = __intel_ring_prepare(engine, num_dwords * sizeof(uint32_t)); |
2503 | if (ret) | 2500 | if (ret) |
2504 | return ret; | 2501 | return ret; |
@@ -3189,7 +3186,7 @@ intel_stop_engine(struct intel_engine_cs *engine) | |||
3189 | return; | 3186 | return; |
3190 | 3187 | ||
3191 | ret = intel_engine_idle(engine); | 3188 | ret = intel_engine_idle(engine); |
3192 | if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error)) | 3189 | if (ret) |
3193 | DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", | 3190 | DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", |
3194 | engine->name, ret); | 3191 | engine->name, ret); |
3195 | 3192 | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 78dc46864a10..2ade194bbea9 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -270,6 +270,7 @@ struct intel_engine_cs { | |||
270 | spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */ | 270 | spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */ |
271 | struct list_head execlist_queue; | 271 | struct list_head execlist_queue; |
272 | struct list_head execlist_retired_req_list; | 272 | struct list_head execlist_retired_req_list; |
273 | unsigned int fw_domains; | ||
273 | unsigned int next_context_status_buffer; | 274 | unsigned int next_context_status_buffer; |
274 | unsigned int idle_lite_restore_wa; | 275 | unsigned int idle_lite_restore_wa; |
275 | bool disable_lite_restore_wa; | 276 | bool disable_lite_restore_wa; |
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 80e8bd4b43b5..7fb1da4e7fc3 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c | |||
@@ -397,11 +397,6 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv, | |||
397 | BIT(POWER_DOMAIN_MODESET) | \ | 397 | BIT(POWER_DOMAIN_MODESET) | \ |
398 | BIT(POWER_DOMAIN_AUX_A) | \ | 398 | BIT(POWER_DOMAIN_AUX_A) | \ |
399 | BIT(POWER_DOMAIN_INIT)) | 399 | BIT(POWER_DOMAIN_INIT)) |
400 | #define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \ | ||
401 | (POWER_DOMAIN_MASK & ~( \ | ||
402 | SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ | ||
403 | SKL_DISPLAY_DC_OFF_POWER_DOMAINS)) | \ | ||
404 | BIT(POWER_DOMAIN_INIT)) | ||
405 | 400 | ||
406 | #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ | 401 | #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ |
407 | BIT(POWER_DOMAIN_TRANSCODER_A) | \ | 402 | BIT(POWER_DOMAIN_TRANSCODER_A) | \ |
@@ -419,39 +414,21 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv, | |||
419 | BIT(POWER_DOMAIN_VGA) | \ | 414 | BIT(POWER_DOMAIN_VGA) | \ |
420 | BIT(POWER_DOMAIN_GMBUS) | \ | 415 | BIT(POWER_DOMAIN_GMBUS) | \ |
421 | BIT(POWER_DOMAIN_INIT)) | 416 | BIT(POWER_DOMAIN_INIT)) |
422 | #define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \ | ||
423 | BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ | ||
424 | BIT(POWER_DOMAIN_PIPE_A) | \ | ||
425 | BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ | ||
426 | BIT(POWER_DOMAIN_TRANSCODER_DSI_A) | \ | ||
427 | BIT(POWER_DOMAIN_TRANSCODER_DSI_C) | \ | ||
428 | BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ | ||
429 | BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ | ||
430 | BIT(POWER_DOMAIN_PORT_DSI) | \ | ||
431 | BIT(POWER_DOMAIN_AUX_A) | \ | ||
432 | BIT(POWER_DOMAIN_PLLS) | \ | ||
433 | BIT(POWER_DOMAIN_INIT)) | ||
434 | #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ | 417 | #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ |
435 | BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ | 418 | BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ |
436 | BIT(POWER_DOMAIN_MODESET) | \ | 419 | BIT(POWER_DOMAIN_MODESET) | \ |
437 | BIT(POWER_DOMAIN_AUX_A) | \ | 420 | BIT(POWER_DOMAIN_AUX_A) | \ |
438 | BIT(POWER_DOMAIN_INIT)) | 421 | BIT(POWER_DOMAIN_INIT)) |
439 | #define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \ | ||
440 | (POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS | \ | ||
441 | BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) | \ | ||
442 | BIT(POWER_DOMAIN_INIT)) | ||
443 | 422 | ||
444 | static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) | 423 | static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) |
445 | { | 424 | { |
446 | struct drm_device *dev = dev_priv->dev; | 425 | WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9), |
447 | 426 | "DC9 already programmed to be enabled.\n"); | |
448 | WARN(!IS_BROXTON(dev), "Platform doesn't support DC9.\n"); | 427 | WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, |
449 | WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9), | 428 | "DC5 still not disabled to enable DC9.\n"); |
450 | "DC9 already programmed to be enabled.\n"); | 429 | WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n"); |
451 | WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, | 430 | WARN_ONCE(intel_irqs_enabled(dev_priv), |
452 | "DC5 still not disabled to enable DC9.\n"); | 431 | "Interrupts not disabled yet.\n"); |
453 | WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n"); | ||
454 | WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n"); | ||
455 | 432 | ||
456 | /* | 433 | /* |
457 | * TODO: check for the following to verify the conditions to enter DC9 | 434 | * TODO: check for the following to verify the conditions to enter DC9 |
@@ -464,9 +441,10 @@ static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) | |||
464 | 441 | ||
465 | static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) | 442 | static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) |
466 | { | 443 | { |
467 | WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n"); | 444 | WARN_ONCE(intel_irqs_enabled(dev_priv), |
468 | WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, | 445 | "Interrupts not disabled yet.\n"); |
469 | "DC5 still not disabled.\n"); | 446 | WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, |
447 | "DC5 still not disabled.\n"); | ||
470 | 448 | ||
471 | /* | 449 | /* |
472 | * TODO: check for the following to verify DC9 state was indeed | 450 | * TODO: check for the following to verify DC9 state was indeed |
@@ -514,10 +492,9 @@ static void gen9_write_dc_state(struct drm_i915_private *dev_priv, | |||
514 | state, rewrites); | 492 | state, rewrites); |
515 | } | 493 | } |
516 | 494 | ||
517 | static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state) | 495 | static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) |
518 | { | 496 | { |
519 | uint32_t val; | 497 | u32 mask; |
520 | uint32_t mask; | ||
521 | 498 | ||
522 | mask = DC_STATE_EN_UPTO_DC5; | 499 | mask = DC_STATE_EN_UPTO_DC5; |
523 | if (IS_BROXTON(dev_priv)) | 500 | if (IS_BROXTON(dev_priv)) |
@@ -525,10 +502,30 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state) | |||
525 | else | 502 | else |
526 | mask |= DC_STATE_EN_UPTO_DC6; | 503 | mask |= DC_STATE_EN_UPTO_DC6; |
527 | 504 | ||
505 | return mask; | ||
506 | } | ||
507 | |||
508 | void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) | ||
509 | { | ||
510 | u32 val; | ||
511 | |||
512 | val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv); | ||
513 | |||
514 | DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n", | ||
515 | dev_priv->csr.dc_state, val); | ||
516 | dev_priv->csr.dc_state = val; | ||
517 | } | ||
518 | |||
519 | static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state) | ||
520 | { | ||
521 | uint32_t val; | ||
522 | uint32_t mask; | ||
523 | |||
528 | if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask)) | 524 | if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask)) |
529 | state &= dev_priv->csr.allowed_dc_mask; | 525 | state &= dev_priv->csr.allowed_dc_mask; |
530 | 526 | ||
531 | val = I915_READ(DC_STATE_EN); | 527 | val = I915_READ(DC_STATE_EN); |
528 | mask = gen9_dc_mask(dev_priv); | ||
532 | DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n", | 529 | DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n", |
533 | val & mask, state); | 530 | val & mask, state); |
534 | 531 | ||
@@ -573,13 +570,9 @@ static void assert_csr_loaded(struct drm_i915_private *dev_priv) | |||
573 | 570 | ||
574 | static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) | 571 | static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) |
575 | { | 572 | { |
576 | struct drm_device *dev = dev_priv->dev; | ||
577 | bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, | 573 | bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, |
578 | SKL_DISP_PW_2); | 574 | SKL_DISP_PW_2); |
579 | 575 | ||
580 | WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev), | ||
581 | "Platform doesn't support DC5.\n"); | ||
582 | WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n"); | ||
583 | WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n"); | 576 | WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n"); |
584 | 577 | ||
585 | WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), | 578 | WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), |
@@ -589,7 +582,7 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) | |||
589 | assert_csr_loaded(dev_priv); | 582 | assert_csr_loaded(dev_priv); |
590 | } | 583 | } |
591 | 584 | ||
592 | static void gen9_enable_dc5(struct drm_i915_private *dev_priv) | 585 | void gen9_enable_dc5(struct drm_i915_private *dev_priv) |
593 | { | 586 | { |
594 | assert_can_enable_dc5(dev_priv); | 587 | assert_can_enable_dc5(dev_priv); |
595 | 588 | ||
@@ -600,11 +593,6 @@ static void gen9_enable_dc5(struct drm_i915_private *dev_priv) | |||
600 | 593 | ||
601 | static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) | 594 | static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) |
602 | { | 595 | { |
603 | struct drm_device *dev = dev_priv->dev; | ||
604 | |||
605 | WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev), | ||
606 | "Platform doesn't support DC6.\n"); | ||
607 | WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n"); | ||
608 | WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, | 596 | WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, |
609 | "Backlight is not disabled.\n"); | 597 | "Backlight is not disabled.\n"); |
610 | WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), | 598 | WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), |
@@ -630,6 +618,45 @@ void skl_disable_dc6(struct drm_i915_private *dev_priv) | |||
630 | gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); | 618 | gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); |
631 | } | 619 | } |
632 | 620 | ||
621 | static void | ||
622 | gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv, | ||
623 | struct i915_power_well *power_well) | ||
624 | { | ||
625 | enum skl_disp_power_wells power_well_id = power_well->data; | ||
626 | u32 val; | ||
627 | u32 mask; | ||
628 | |||
629 | mask = SKL_POWER_WELL_REQ(power_well_id); | ||
630 | |||
631 | val = I915_READ(HSW_PWR_WELL_KVMR); | ||
632 | if (WARN_ONCE(val & mask, "Clearing unexpected KVMR request for %s\n", | ||
633 | power_well->name)) | ||
634 | I915_WRITE(HSW_PWR_WELL_KVMR, val & ~mask); | ||
635 | |||
636 | val = I915_READ(HSW_PWR_WELL_BIOS); | ||
637 | val |= I915_READ(HSW_PWR_WELL_DEBUG); | ||
638 | |||
639 | if (!(val & mask)) | ||
640 | return; | ||
641 | |||
642 | /* | ||
643 | * DMC is known to force on the request bits for power well 1 on SKL | ||
644 | * and BXT and the misc IO power well on SKL but we don't expect any | ||
645 | * other request bits to be set, so WARN for those. | ||
646 | */ | ||
647 | if (power_well_id == SKL_DISP_PW_1 || | ||
648 | ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && | ||
649 | power_well_id == SKL_DISP_PW_MISC_IO)) | ||
650 | DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on " | ||
651 | "by DMC\n", power_well->name); | ||
652 | else | ||
653 | WARN_ONCE(1, "Clearing unexpected auxiliary requests for %s\n", | ||
654 | power_well->name); | ||
655 | |||
656 | I915_WRITE(HSW_PWR_WELL_BIOS, val & ~mask); | ||
657 | I915_WRITE(HSW_PWR_WELL_DEBUG, val & ~mask); | ||
658 | } | ||
659 | |||
633 | static void skl_set_power_well(struct drm_i915_private *dev_priv, | 660 | static void skl_set_power_well(struct drm_i915_private *dev_priv, |
634 | struct i915_power_well *power_well, bool enable) | 661 | struct i915_power_well *power_well, bool enable) |
635 | { | 662 | { |
@@ -684,10 +711,6 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv, | |||
684 | 711 | ||
685 | if (!is_enabled) { | 712 | if (!is_enabled) { |
686 | DRM_DEBUG_KMS("Enabling %s\n", power_well->name); | 713 | DRM_DEBUG_KMS("Enabling %s\n", power_well->name); |
687 | if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & | ||
688 | state_mask), 1)) | ||
689 | DRM_ERROR("%s enable timeout\n", | ||
690 | power_well->name); | ||
691 | check_fuse_status = true; | 714 | check_fuse_status = true; |
692 | } | 715 | } |
693 | } else { | 716 | } else { |
@@ -696,8 +719,16 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv, | |||
696 | POSTING_READ(HSW_PWR_WELL_DRIVER); | 719 | POSTING_READ(HSW_PWR_WELL_DRIVER); |
697 | DRM_DEBUG_KMS("Disabling %s\n", power_well->name); | 720 | DRM_DEBUG_KMS("Disabling %s\n", power_well->name); |
698 | } | 721 | } |
722 | |||
723 | if (IS_GEN9(dev_priv)) | ||
724 | gen9_sanitize_power_well_requests(dev_priv, power_well); | ||
699 | } | 725 | } |
700 | 726 | ||
727 | if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable, | ||
728 | 1)) | ||
729 | DRM_ERROR("%s %s timeout\n", | ||
730 | power_well->name, enable ? "enable" : "disable"); | ||
731 | |||
701 | if (check_fuse_status) { | 732 | if (check_fuse_status) { |
702 | if (power_well->data == SKL_DISP_PW_1) { | 733 | if (power_well->data == SKL_DISP_PW_1) { |
703 | if (wait_for((I915_READ(SKL_FUSE_STATUS) & | 734 | if (wait_for((I915_READ(SKL_FUSE_STATUS) & |
@@ -779,11 +810,19 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, | |||
779 | struct i915_power_well *power_well) | 810 | struct i915_power_well *power_well) |
780 | { | 811 | { |
781 | gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); | 812 | gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); |
813 | |||
814 | if (IS_BROXTON(dev_priv)) { | ||
815 | broxton_cdclk_verify_state(dev_priv); | ||
816 | broxton_ddi_phy_verify_state(dev_priv); | ||
817 | } | ||
782 | } | 818 | } |
783 | 819 | ||
784 | static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, | 820 | static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, |
785 | struct i915_power_well *power_well) | 821 | struct i915_power_well *power_well) |
786 | { | 822 | { |
823 | if (!dev_priv->csr.dmc_payload) | ||
824 | return; | ||
825 | |||
787 | if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6) | 826 | if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6) |
788 | skl_enable_dc6(dev_priv); | 827 | skl_enable_dc6(dev_priv); |
789 | else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5) | 828 | else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5) |
@@ -900,6 +939,17 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, | |||
900 | return enabled; | 939 | return enabled; |
901 | } | 940 | } |
902 | 941 | ||
942 | static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) | ||
943 | { | ||
944 | I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); | ||
945 | |||
946 | /* | ||
947 | * Disable trickle feed and enable pnd deadline calculation | ||
948 | */ | ||
949 | I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); | ||
950 | I915_WRITE(CBR1_VLV, 0); | ||
951 | } | ||
952 | |||
903 | static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) | 953 | static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) |
904 | { | 954 | { |
905 | enum pipe pipe; | 955 | enum pipe pipe; |
@@ -922,6 +972,8 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) | |||
922 | I915_WRITE(DPLL(pipe), val); | 972 | I915_WRITE(DPLL(pipe), val); |
923 | } | 973 | } |
924 | 974 | ||
975 | vlv_init_display_clock_gating(dev_priv); | ||
976 | |||
925 | spin_lock_irq(&dev_priv->irq_lock); | 977 | spin_lock_irq(&dev_priv->irq_lock); |
926 | valleyview_enable_display_irqs(dev_priv); | 978 | valleyview_enable_display_irqs(dev_priv); |
927 | spin_unlock_irq(&dev_priv->irq_lock); | 979 | spin_unlock_irq(&dev_priv->irq_lock); |
@@ -1560,34 +1612,56 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, | |||
1560 | intel_runtime_pm_put(dev_priv); | 1612 | intel_runtime_pm_put(dev_priv); |
1561 | } | 1613 | } |
1562 | 1614 | ||
1563 | #define HSW_ALWAYS_ON_POWER_DOMAINS ( \ | 1615 | #define HSW_DISPLAY_POWER_DOMAINS ( \ |
1564 | BIT(POWER_DOMAIN_PIPE_A) | \ | 1616 | BIT(POWER_DOMAIN_PIPE_B) | \ |
1565 | BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ | 1617 | BIT(POWER_DOMAIN_PIPE_C) | \ |
1566 | BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ | 1618 | BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ |
1619 | BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ | ||
1620 | BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ | ||
1621 | BIT(POWER_DOMAIN_TRANSCODER_A) | \ | ||
1622 | BIT(POWER_DOMAIN_TRANSCODER_B) | \ | ||
1623 | BIT(POWER_DOMAIN_TRANSCODER_C) | \ | ||
1567 | BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ | 1624 | BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ |
1568 | BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ | 1625 | BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ |
1569 | BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ | 1626 | BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ |
1570 | BIT(POWER_DOMAIN_PORT_CRT) | \ | 1627 | BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ |
1571 | BIT(POWER_DOMAIN_PLLS) | \ | 1628 | BIT(POWER_DOMAIN_VGA) | \ |
1572 | BIT(POWER_DOMAIN_AUX_A) | \ | 1629 | BIT(POWER_DOMAIN_AUDIO) | \ |
1573 | BIT(POWER_DOMAIN_AUX_B) | \ | ||
1574 | BIT(POWER_DOMAIN_AUX_C) | \ | ||
1575 | BIT(POWER_DOMAIN_AUX_D) | \ | ||
1576 | BIT(POWER_DOMAIN_GMBUS) | \ | ||
1577 | BIT(POWER_DOMAIN_INIT)) | ||
1578 | #define HSW_DISPLAY_POWER_DOMAINS ( \ | ||
1579 | (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \ | ||
1580 | BIT(POWER_DOMAIN_INIT)) | 1630 | BIT(POWER_DOMAIN_INIT)) |
1581 | 1631 | ||
1582 | #define BDW_ALWAYS_ON_POWER_DOMAINS ( \ | 1632 | #define BDW_DISPLAY_POWER_DOMAINS ( \ |
1583 | HSW_ALWAYS_ON_POWER_DOMAINS | \ | 1633 | BIT(POWER_DOMAIN_PIPE_B) | \ |
1584 | BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER)) | 1634 | BIT(POWER_DOMAIN_PIPE_C) | \ |
1585 | #define BDW_DISPLAY_POWER_DOMAINS ( \ | 1635 | BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ |
1586 | (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \ | 1636 | BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ |
1637 | BIT(POWER_DOMAIN_TRANSCODER_A) | \ | ||
1638 | BIT(POWER_DOMAIN_TRANSCODER_B) | \ | ||
1639 | BIT(POWER_DOMAIN_TRANSCODER_C) | \ | ||
1640 | BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ | ||
1641 | BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ | ||
1642 | BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ | ||
1643 | BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ | ||
1644 | BIT(POWER_DOMAIN_VGA) | \ | ||
1645 | BIT(POWER_DOMAIN_AUDIO) | \ | ||
1587 | BIT(POWER_DOMAIN_INIT)) | 1646 | BIT(POWER_DOMAIN_INIT)) |
1588 | 1647 | ||
1589 | #define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT) | 1648 | #define VLV_DISPLAY_POWER_DOMAINS ( \ |
1590 | #define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK | 1649 | BIT(POWER_DOMAIN_PIPE_A) | \ |
1650 | BIT(POWER_DOMAIN_PIPE_B) | \ | ||
1651 | BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ | ||
1652 | BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ | ||
1653 | BIT(POWER_DOMAIN_TRANSCODER_A) | \ | ||
1654 | BIT(POWER_DOMAIN_TRANSCODER_B) | \ | ||
1655 | BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ | ||
1656 | BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ | ||
1657 | BIT(POWER_DOMAIN_PORT_DSI) | \ | ||
1658 | BIT(POWER_DOMAIN_PORT_CRT) | \ | ||
1659 | BIT(POWER_DOMAIN_VGA) | \ | ||
1660 | BIT(POWER_DOMAIN_AUDIO) | \ | ||
1661 | BIT(POWER_DOMAIN_AUX_B) | \ | ||
1662 | BIT(POWER_DOMAIN_AUX_C) | \ | ||
1663 | BIT(POWER_DOMAIN_GMBUS) | \ | ||
1664 | BIT(POWER_DOMAIN_INIT)) | ||
1591 | 1665 | ||
1592 | #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ | 1666 | #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ |
1593 | BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ | 1667 | BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ |
@@ -1617,6 +1691,28 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, | |||
1617 | BIT(POWER_DOMAIN_AUX_C) | \ | 1691 | BIT(POWER_DOMAIN_AUX_C) | \ |
1618 | BIT(POWER_DOMAIN_INIT)) | 1692 | BIT(POWER_DOMAIN_INIT)) |
1619 | 1693 | ||
1694 | #define CHV_DISPLAY_POWER_DOMAINS ( \ | ||
1695 | BIT(POWER_DOMAIN_PIPE_A) | \ | ||
1696 | BIT(POWER_DOMAIN_PIPE_B) | \ | ||
1697 | BIT(POWER_DOMAIN_PIPE_C) | \ | ||
1698 | BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ | ||
1699 | BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ | ||
1700 | BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ | ||
1701 | BIT(POWER_DOMAIN_TRANSCODER_A) | \ | ||
1702 | BIT(POWER_DOMAIN_TRANSCODER_B) | \ | ||
1703 | BIT(POWER_DOMAIN_TRANSCODER_C) | \ | ||
1704 | BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ | ||
1705 | BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ | ||
1706 | BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ | ||
1707 | BIT(POWER_DOMAIN_PORT_DSI) | \ | ||
1708 | BIT(POWER_DOMAIN_VGA) | \ | ||
1709 | BIT(POWER_DOMAIN_AUDIO) | \ | ||
1710 | BIT(POWER_DOMAIN_AUX_B) | \ | ||
1711 | BIT(POWER_DOMAIN_AUX_C) | \ | ||
1712 | BIT(POWER_DOMAIN_AUX_D) | \ | ||
1713 | BIT(POWER_DOMAIN_GMBUS) | \ | ||
1714 | BIT(POWER_DOMAIN_INIT)) | ||
1715 | |||
1620 | #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ | 1716 | #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ |
1621 | BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ | 1717 | BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ |
1622 | BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ | 1718 | BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ |
@@ -1684,7 +1780,7 @@ static struct i915_power_well hsw_power_wells[] = { | |||
1684 | { | 1780 | { |
1685 | .name = "always-on", | 1781 | .name = "always-on", |
1686 | .always_on = 1, | 1782 | .always_on = 1, |
1687 | .domains = HSW_ALWAYS_ON_POWER_DOMAINS, | 1783 | .domains = POWER_DOMAIN_MASK, |
1688 | .ops = &i9xx_always_on_power_well_ops, | 1784 | .ops = &i9xx_always_on_power_well_ops, |
1689 | }, | 1785 | }, |
1690 | { | 1786 | { |
@@ -1698,7 +1794,7 @@ static struct i915_power_well bdw_power_wells[] = { | |||
1698 | { | 1794 | { |
1699 | .name = "always-on", | 1795 | .name = "always-on", |
1700 | .always_on = 1, | 1796 | .always_on = 1, |
1701 | .domains = BDW_ALWAYS_ON_POWER_DOMAINS, | 1797 | .domains = POWER_DOMAIN_MASK, |
1702 | .ops = &i9xx_always_on_power_well_ops, | 1798 | .ops = &i9xx_always_on_power_well_ops, |
1703 | }, | 1799 | }, |
1704 | { | 1800 | { |
@@ -1733,7 +1829,7 @@ static struct i915_power_well vlv_power_wells[] = { | |||
1733 | { | 1829 | { |
1734 | .name = "always-on", | 1830 | .name = "always-on", |
1735 | .always_on = 1, | 1831 | .always_on = 1, |
1736 | .domains = VLV_ALWAYS_ON_POWER_DOMAINS, | 1832 | .domains = POWER_DOMAIN_MASK, |
1737 | .ops = &i9xx_always_on_power_well_ops, | 1833 | .ops = &i9xx_always_on_power_well_ops, |
1738 | .data = PUNIT_POWER_WELL_ALWAYS_ON, | 1834 | .data = PUNIT_POWER_WELL_ALWAYS_ON, |
1739 | }, | 1835 | }, |
@@ -1791,7 +1887,7 @@ static struct i915_power_well chv_power_wells[] = { | |||
1791 | { | 1887 | { |
1792 | .name = "always-on", | 1888 | .name = "always-on", |
1793 | .always_on = 1, | 1889 | .always_on = 1, |
1794 | .domains = VLV_ALWAYS_ON_POWER_DOMAINS, | 1890 | .domains = POWER_DOMAIN_MASK, |
1795 | .ops = &i9xx_always_on_power_well_ops, | 1891 | .ops = &i9xx_always_on_power_well_ops, |
1796 | }, | 1892 | }, |
1797 | { | 1893 | { |
@@ -1801,7 +1897,7 @@ static struct i915_power_well chv_power_wells[] = { | |||
1801 | * power wells don't actually exist. Pipe A power well is | 1897 | * power wells don't actually exist. Pipe A power well is |
1802 | * required for any pipe to work. | 1898 | * required for any pipe to work. |
1803 | */ | 1899 | */ |
1804 | .domains = VLV_DISPLAY_POWER_DOMAINS, | 1900 | .domains = CHV_DISPLAY_POWER_DOMAINS, |
1805 | .data = PIPE_A, | 1901 | .data = PIPE_A, |
1806 | .ops = &chv_pipe_power_well_ops, | 1902 | .ops = &chv_pipe_power_well_ops, |
1807 | }, | 1903 | }, |
@@ -1835,7 +1931,7 @@ static struct i915_power_well skl_power_wells[] = { | |||
1835 | { | 1931 | { |
1836 | .name = "always-on", | 1932 | .name = "always-on", |
1837 | .always_on = 1, | 1933 | .always_on = 1, |
1838 | .domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS, | 1934 | .domains = POWER_DOMAIN_MASK, |
1839 | .ops = &i9xx_always_on_power_well_ops, | 1935 | .ops = &i9xx_always_on_power_well_ops, |
1840 | .data = SKL_DISP_PW_ALWAYS_ON, | 1936 | .data = SKL_DISP_PW_ALWAYS_ON, |
1841 | }, | 1937 | }, |
@@ -1891,44 +1987,16 @@ static struct i915_power_well skl_power_wells[] = { | |||
1891 | }, | 1987 | }, |
1892 | }; | 1988 | }; |
1893 | 1989 | ||
1894 | void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv) | ||
1895 | { | ||
1896 | struct i915_power_well *well; | ||
1897 | |||
1898 | if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) | ||
1899 | return; | ||
1900 | |||
1901 | well = lookup_power_well(dev_priv, SKL_DISP_PW_1); | ||
1902 | intel_power_well_enable(dev_priv, well); | ||
1903 | |||
1904 | well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); | ||
1905 | intel_power_well_enable(dev_priv, well); | ||
1906 | } | ||
1907 | |||
1908 | void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv) | ||
1909 | { | ||
1910 | struct i915_power_well *well; | ||
1911 | |||
1912 | if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) | ||
1913 | return; | ||
1914 | |||
1915 | well = lookup_power_well(dev_priv, SKL_DISP_PW_1); | ||
1916 | intel_power_well_disable(dev_priv, well); | ||
1917 | |||
1918 | well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); | ||
1919 | intel_power_well_disable(dev_priv, well); | ||
1920 | } | ||
1921 | |||
1922 | static struct i915_power_well bxt_power_wells[] = { | 1990 | static struct i915_power_well bxt_power_wells[] = { |
1923 | { | 1991 | { |
1924 | .name = "always-on", | 1992 | .name = "always-on", |
1925 | .always_on = 1, | 1993 | .always_on = 1, |
1926 | .domains = BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS, | 1994 | .domains = POWER_DOMAIN_MASK, |
1927 | .ops = &i9xx_always_on_power_well_ops, | 1995 | .ops = &i9xx_always_on_power_well_ops, |
1928 | }, | 1996 | }, |
1929 | { | 1997 | { |
1930 | .name = "power well 1", | 1998 | .name = "power well 1", |
1931 | .domains = BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS, | 1999 | .domains = 0, |
1932 | .ops = &skl_power_well_ops, | 2000 | .ops = &skl_power_well_ops, |
1933 | .data = SKL_DISP_PW_1, | 2001 | .data = SKL_DISP_PW_1, |
1934 | }, | 2002 | }, |
@@ -1953,11 +2021,6 @@ sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, | |||
1953 | if (disable_power_well >= 0) | 2021 | if (disable_power_well >= 0) |
1954 | return !!disable_power_well; | 2022 | return !!disable_power_well; |
1955 | 2023 | ||
1956 | if (IS_BROXTON(dev_priv)) { | ||
1957 | DRM_DEBUG_KMS("Disabling display power well support\n"); | ||
1958 | return 0; | ||
1959 | } | ||
1960 | |||
1961 | return 1; | 2024 | return 1; |
1962 | } | 2025 | } |
1963 | 2026 | ||
@@ -2109,9 +2172,10 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) | |||
2109 | } | 2172 | } |
2110 | 2173 | ||
2111 | static void skl_display_core_init(struct drm_i915_private *dev_priv, | 2174 | static void skl_display_core_init(struct drm_i915_private *dev_priv, |
2112 | bool resume) | 2175 | bool resume) |
2113 | { | 2176 | { |
2114 | struct i915_power_domains *power_domains = &dev_priv->power_domains; | 2177 | struct i915_power_domains *power_domains = &dev_priv->power_domains; |
2178 | struct i915_power_well *well; | ||
2115 | uint32_t val; | 2179 | uint32_t val; |
2116 | 2180 | ||
2117 | gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); | 2181 | gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); |
@@ -2122,7 +2186,13 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv, | |||
2122 | 2186 | ||
2123 | /* enable PG1 and Misc I/O */ | 2187 | /* enable PG1 and Misc I/O */ |
2124 | mutex_lock(&power_domains->lock); | 2188 | mutex_lock(&power_domains->lock); |
2125 | skl_pw1_misc_io_init(dev_priv); | 2189 | |
2190 | well = lookup_power_well(dev_priv, SKL_DISP_PW_1); | ||
2191 | intel_power_well_enable(dev_priv, well); | ||
2192 | |||
2193 | well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); | ||
2194 | intel_power_well_enable(dev_priv, well); | ||
2195 | |||
2126 | mutex_unlock(&power_domains->lock); | 2196 | mutex_unlock(&power_domains->lock); |
2127 | 2197 | ||
2128 | if (!resume) | 2198 | if (!resume) |
@@ -2137,6 +2207,7 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv, | |||
2137 | static void skl_display_core_uninit(struct drm_i915_private *dev_priv) | 2207 | static void skl_display_core_uninit(struct drm_i915_private *dev_priv) |
2138 | { | 2208 | { |
2139 | struct i915_power_domains *power_domains = &dev_priv->power_domains; | 2209 | struct i915_power_domains *power_domains = &dev_priv->power_domains; |
2210 | struct i915_power_well *well; | ||
2140 | 2211 | ||
2141 | gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); | 2212 | gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); |
2142 | 2213 | ||
@@ -2144,8 +2215,73 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv) | |||
2144 | 2215 | ||
2145 | /* The spec doesn't call for removing the reset handshake flag */ | 2216 | /* The spec doesn't call for removing the reset handshake flag */ |
2146 | /* disable PG1 and Misc I/O */ | 2217 | /* disable PG1 and Misc I/O */ |
2218 | |||
2147 | mutex_lock(&power_domains->lock); | 2219 | mutex_lock(&power_domains->lock); |
2148 | skl_pw1_misc_io_fini(dev_priv); | 2220 | |
2221 | well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); | ||
2222 | intel_power_well_disable(dev_priv, well); | ||
2223 | |||
2224 | well = lookup_power_well(dev_priv, SKL_DISP_PW_1); | ||
2225 | intel_power_well_disable(dev_priv, well); | ||
2226 | |||
2227 | mutex_unlock(&power_domains->lock); | ||
2228 | } | ||
2229 | |||
2230 | void bxt_display_core_init(struct drm_i915_private *dev_priv, | ||
2231 | bool resume) | ||
2232 | { | ||
2233 | struct i915_power_domains *power_domains = &dev_priv->power_domains; | ||
2234 | struct i915_power_well *well; | ||
2235 | uint32_t val; | ||
2236 | |||
2237 | gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); | ||
2238 | |||
2239 | /* | ||
2240 | * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT | ||
2241 | * or else the reset will hang because there is no PCH to respond. | ||
2242 | * Move the handshake programming to initialization sequence. | ||
2243 | * Previously was left up to BIOS. | ||
2244 | */ | ||
2245 | val = I915_READ(HSW_NDE_RSTWRN_OPT); | ||
2246 | val &= ~RESET_PCH_HANDSHAKE_ENABLE; | ||
2247 | I915_WRITE(HSW_NDE_RSTWRN_OPT, val); | ||
2248 | |||
2249 | /* Enable PG1 */ | ||
2250 | mutex_lock(&power_domains->lock); | ||
2251 | |||
2252 | well = lookup_power_well(dev_priv, SKL_DISP_PW_1); | ||
2253 | intel_power_well_enable(dev_priv, well); | ||
2254 | |||
2255 | mutex_unlock(&power_domains->lock); | ||
2256 | |||
2257 | broxton_init_cdclk(dev_priv); | ||
2258 | broxton_ddi_phy_init(dev_priv); | ||
2259 | |||
2260 | broxton_cdclk_verify_state(dev_priv); | ||
2261 | broxton_ddi_phy_verify_state(dev_priv); | ||
2262 | |||
2263 | if (resume && dev_priv->csr.dmc_payload) | ||
2264 | intel_csr_load_program(dev_priv); | ||
2265 | } | ||
2266 | |||
2267 | void bxt_display_core_uninit(struct drm_i915_private *dev_priv) | ||
2268 | { | ||
2269 | struct i915_power_domains *power_domains = &dev_priv->power_domains; | ||
2270 | struct i915_power_well *well; | ||
2271 | |||
2272 | gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); | ||
2273 | |||
2274 | broxton_ddi_phy_uninit(dev_priv); | ||
2275 | broxton_uninit_cdclk(dev_priv); | ||
2276 | |||
2277 | /* The spec doesn't call for removing the reset handshake flag */ | ||
2278 | |||
2279 | /* Disable PG1 */ | ||
2280 | mutex_lock(&power_domains->lock); | ||
2281 | |||
2282 | well = lookup_power_well(dev_priv, SKL_DISP_PW_1); | ||
2283 | intel_power_well_disable(dev_priv, well); | ||
2284 | |||
2149 | mutex_unlock(&power_domains->lock); | 2285 | mutex_unlock(&power_domains->lock); |
2150 | } | 2286 | } |
2151 | 2287 | ||
@@ -2280,6 +2416,8 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) | |||
2280 | 2416 | ||
2281 | if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { | 2417 | if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { |
2282 | skl_display_core_init(dev_priv, resume); | 2418 | skl_display_core_init(dev_priv, resume); |
2419 | } else if (IS_BROXTON(dev)) { | ||
2420 | bxt_display_core_init(dev_priv, resume); | ||
2283 | } else if (IS_CHERRYVIEW(dev)) { | 2421 | } else if (IS_CHERRYVIEW(dev)) { |
2284 | mutex_lock(&power_domains->lock); | 2422 | mutex_lock(&power_domains->lock); |
2285 | chv_phy_control_init(dev_priv); | 2423 | chv_phy_control_init(dev_priv); |
@@ -2317,6 +2455,8 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv) | |||
2317 | 2455 | ||
2318 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) | 2456 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) |
2319 | skl_display_core_uninit(dev_priv); | 2457 | skl_display_core_uninit(dev_priv); |
2458 | else if (IS_BROXTON(dev_priv)) | ||
2459 | bxt_display_core_uninit(dev_priv); | ||
2320 | } | 2460 | } |
2321 | 2461 | ||
2322 | /** | 2462 | /** |
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index ac2ac07b505b..4f1dfe616856 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c | |||
@@ -60,7 +60,11 @@ fw_domain_reset(const struct intel_uncore_forcewake_domain *d) | |||
60 | static inline void | 60 | static inline void |
61 | fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) | 61 | fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) |
62 | { | 62 | { |
63 | mod_timer_pinned(&d->timer, jiffies + 1); | 63 | d->wake_count++; |
64 | hrtimer_start_range_ns(&d->timer, | ||
65 | ktime_set(0, NSEC_PER_MSEC), | ||
66 | NSEC_PER_MSEC, | ||
67 | HRTIMER_MODE_REL); | ||
64 | } | 68 | } |
65 | 69 | ||
66 | static inline void | 70 | static inline void |
@@ -107,22 +111,22 @@ static void | |||
107 | fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) | 111 | fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) |
108 | { | 112 | { |
109 | struct intel_uncore_forcewake_domain *d; | 113 | struct intel_uncore_forcewake_domain *d; |
110 | enum forcewake_domain_id id; | ||
111 | 114 | ||
112 | for_each_fw_domain_mask(d, fw_domains, dev_priv, id) { | 115 | for_each_fw_domain_masked(d, fw_domains, dev_priv) { |
113 | fw_domain_wait_ack_clear(d); | 116 | fw_domain_wait_ack_clear(d); |
114 | fw_domain_get(d); | 117 | fw_domain_get(d); |
115 | fw_domain_wait_ack(d); | ||
116 | } | 118 | } |
119 | |||
120 | for_each_fw_domain_masked(d, fw_domains, dev_priv) | ||
121 | fw_domain_wait_ack(d); | ||
117 | } | 122 | } |
118 | 123 | ||
119 | static void | 124 | static void |
120 | fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) | 125 | fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) |
121 | { | 126 | { |
122 | struct intel_uncore_forcewake_domain *d; | 127 | struct intel_uncore_forcewake_domain *d; |
123 | enum forcewake_domain_id id; | ||
124 | 128 | ||
125 | for_each_fw_domain_mask(d, fw_domains, dev_priv, id) { | 129 | for_each_fw_domain_masked(d, fw_domains, dev_priv) { |
126 | fw_domain_put(d); | 130 | fw_domain_put(d); |
127 | fw_domain_posting_read(d); | 131 | fw_domain_posting_read(d); |
128 | } | 132 | } |
@@ -132,10 +136,9 @@ static void | |||
132 | fw_domains_posting_read(struct drm_i915_private *dev_priv) | 136 | fw_domains_posting_read(struct drm_i915_private *dev_priv) |
133 | { | 137 | { |
134 | struct intel_uncore_forcewake_domain *d; | 138 | struct intel_uncore_forcewake_domain *d; |
135 | enum forcewake_domain_id id; | ||
136 | 139 | ||
137 | /* No need to do for all, just do for first found */ | 140 | /* No need to do for all, just do for first found */ |
138 | for_each_fw_domain(d, dev_priv, id) { | 141 | for_each_fw_domain(d, dev_priv) { |
139 | fw_domain_posting_read(d); | 142 | fw_domain_posting_read(d); |
140 | break; | 143 | break; |
141 | } | 144 | } |
@@ -145,12 +148,11 @@ static void | |||
145 | fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) | 148 | fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) |
146 | { | 149 | { |
147 | struct intel_uncore_forcewake_domain *d; | 150 | struct intel_uncore_forcewake_domain *d; |
148 | enum forcewake_domain_id id; | ||
149 | 151 | ||
150 | if (dev_priv->uncore.fw_domains == 0) | 152 | if (dev_priv->uncore.fw_domains == 0) |
151 | return; | 153 | return; |
152 | 154 | ||
153 | for_each_fw_domain_mask(d, fw_domains, dev_priv, id) | 155 | for_each_fw_domain_masked(d, fw_domains, dev_priv) |
154 | fw_domain_reset(d); | 156 | fw_domain_reset(d); |
155 | 157 | ||
156 | fw_domains_posting_read(dev_priv); | 158 | fw_domains_posting_read(dev_priv); |
@@ -224,9 +226,11 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) | |||
224 | return ret; | 226 | return ret; |
225 | } | 227 | } |
226 | 228 | ||
227 | static void intel_uncore_fw_release_timer(unsigned long arg) | 229 | static enum hrtimer_restart |
230 | intel_uncore_fw_release_timer(struct hrtimer *timer) | ||
228 | { | 231 | { |
229 | struct intel_uncore_forcewake_domain *domain = (void *)arg; | 232 | struct intel_uncore_forcewake_domain *domain = |
233 | container_of(timer, struct intel_uncore_forcewake_domain, timer); | ||
230 | unsigned long irqflags; | 234 | unsigned long irqflags; |
231 | 235 | ||
232 | assert_rpm_device_not_suspended(domain->i915); | 236 | assert_rpm_device_not_suspended(domain->i915); |
@@ -240,6 +244,8 @@ static void intel_uncore_fw_release_timer(unsigned long arg) | |||
240 | 1 << domain->id); | 244 | 1 << domain->id); |
241 | 245 | ||
242 | spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags); | 246 | spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags); |
247 | |||
248 | return HRTIMER_NORESTART; | ||
243 | } | 249 | } |
244 | 250 | ||
245 | void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) | 251 | void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) |
@@ -248,7 +254,6 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) | |||
248 | unsigned long irqflags; | 254 | unsigned long irqflags; |
249 | struct intel_uncore_forcewake_domain *domain; | 255 | struct intel_uncore_forcewake_domain *domain; |
250 | int retry_count = 100; | 256 | int retry_count = 100; |
251 | enum forcewake_domain_id id; | ||
252 | enum forcewake_domains fw = 0, active_domains; | 257 | enum forcewake_domains fw = 0, active_domains; |
253 | 258 | ||
254 | /* Hold uncore.lock across reset to prevent any register access | 259 | /* Hold uncore.lock across reset to prevent any register access |
@@ -258,18 +263,18 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) | |||
258 | while (1) { | 263 | while (1) { |
259 | active_domains = 0; | 264 | active_domains = 0; |
260 | 265 | ||
261 | for_each_fw_domain(domain, dev_priv, id) { | 266 | for_each_fw_domain(domain, dev_priv) { |
262 | if (del_timer_sync(&domain->timer) == 0) | 267 | if (hrtimer_cancel(&domain->timer) == 0) |
263 | continue; | 268 | continue; |
264 | 269 | ||
265 | intel_uncore_fw_release_timer((unsigned long)domain); | 270 | intel_uncore_fw_release_timer(&domain->timer); |
266 | } | 271 | } |
267 | 272 | ||
268 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | 273 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
269 | 274 | ||
270 | for_each_fw_domain(domain, dev_priv, id) { | 275 | for_each_fw_domain(domain, dev_priv) { |
271 | if (timer_pending(&domain->timer)) | 276 | if (hrtimer_active(&domain->timer)) |
272 | active_domains |= (1 << id); | 277 | active_domains |= domain->mask; |
273 | } | 278 | } |
274 | 279 | ||
275 | if (active_domains == 0) | 280 | if (active_domains == 0) |
@@ -286,9 +291,9 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) | |||
286 | 291 | ||
287 | WARN_ON(active_domains); | 292 | WARN_ON(active_domains); |
288 | 293 | ||
289 | for_each_fw_domain(domain, dev_priv, id) | 294 | for_each_fw_domain(domain, dev_priv) |
290 | if (domain->wake_count) | 295 | if (domain->wake_count) |
291 | fw |= 1 << id; | 296 | fw |= domain->mask; |
292 | 297 | ||
293 | if (fw) | 298 | if (fw) |
294 | dev_priv->uncore.funcs.force_wake_put(dev_priv, fw); | 299 | dev_priv->uncore.funcs.force_wake_put(dev_priv, fw); |
@@ -310,21 +315,49 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) | |||
310 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | 315 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
311 | } | 316 | } |
312 | 317 | ||
313 | static void intel_uncore_ellc_detect(struct drm_device *dev) | 318 | static u64 gen9_edram_size(struct drm_i915_private *dev_priv) |
314 | { | 319 | { |
315 | struct drm_i915_private *dev_priv = dev->dev_private; | 320 | const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 }; |
321 | const unsigned int sets[4] = { 1, 1, 2, 2 }; | ||
322 | const u32 cap = dev_priv->edram_cap; | ||
323 | |||
324 | return EDRAM_NUM_BANKS(cap) * | ||
325 | ways[EDRAM_WAYS_IDX(cap)] * | ||
326 | sets[EDRAM_SETS_IDX(cap)] * | ||
327 | 1024 * 1024; | ||
328 | } | ||
329 | |||
330 | u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv) | ||
331 | { | ||
332 | if (!HAS_EDRAM(dev_priv)) | ||
333 | return 0; | ||
334 | |||
335 | /* The needed capability bits for size calculation | ||
336 | * are not there with pre gen9 so return 128MB always. | ||
337 | */ | ||
338 | if (INTEL_GEN(dev_priv) < 9) | ||
339 | return 128 * 1024 * 1024; | ||
340 | |||
341 | return gen9_edram_size(dev_priv); | ||
342 | } | ||
343 | |||
344 | static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv) | ||
345 | { | ||
346 | if (IS_HASWELL(dev_priv) || | ||
347 | IS_BROADWELL(dev_priv) || | ||
348 | INTEL_GEN(dev_priv) >= 9) { | ||
349 | dev_priv->edram_cap = __raw_i915_read32(dev_priv, | ||
350 | HSW_EDRAM_CAP); | ||
316 | 351 | ||
317 | if ((IS_HASWELL(dev) || IS_BROADWELL(dev) || | 352 | /* NB: We can't write IDICR yet because we do not have gt funcs |
318 | INTEL_INFO(dev)->gen >= 9) && | ||
319 | (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) { | ||
320 | /* The docs do not explain exactly how the calculation can be | ||
321 | * made. It is somewhat guessable, but for now, it's always | ||
322 | * 128MB. | ||
323 | * NB: We can't write IDICR yet because we do not have gt funcs | ||
324 | * set up */ | 353 | * set up */ |
325 | dev_priv->ellc_size = 128; | 354 | } else { |
326 | DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); | 355 | dev_priv->edram_cap = 0; |
327 | } | 356 | } |
357 | |||
358 | if (HAS_EDRAM(dev_priv)) | ||
359 | DRM_INFO("Found %lluMB of eDRAM\n", | ||
360 | intel_uncore_edram_size(dev_priv) / (1024 * 1024)); | ||
328 | } | 361 | } |
329 | 362 | ||
330 | static bool | 363 | static bool |
@@ -410,16 +443,15 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, | |||
410 | enum forcewake_domains fw_domains) | 443 | enum forcewake_domains fw_domains) |
411 | { | 444 | { |
412 | struct intel_uncore_forcewake_domain *domain; | 445 | struct intel_uncore_forcewake_domain *domain; |
413 | enum forcewake_domain_id id; | ||
414 | 446 | ||
415 | if (!dev_priv->uncore.funcs.force_wake_get) | 447 | if (!dev_priv->uncore.funcs.force_wake_get) |
416 | return; | 448 | return; |
417 | 449 | ||
418 | fw_domains &= dev_priv->uncore.fw_domains; | 450 | fw_domains &= dev_priv->uncore.fw_domains; |
419 | 451 | ||
420 | for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) { | 452 | for_each_fw_domain_masked(domain, fw_domains, dev_priv) { |
421 | if (domain->wake_count++) | 453 | if (domain->wake_count++) |
422 | fw_domains &= ~(1 << id); | 454 | fw_domains &= ~domain->mask; |
423 | } | 455 | } |
424 | 456 | ||
425 | if (fw_domains) | 457 | if (fw_domains) |
@@ -477,21 +509,19 @@ static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, | |||
477 | enum forcewake_domains fw_domains) | 509 | enum forcewake_domains fw_domains) |
478 | { | 510 | { |
479 | struct intel_uncore_forcewake_domain *domain; | 511 | struct intel_uncore_forcewake_domain *domain; |
480 | enum forcewake_domain_id id; | ||
481 | 512 | ||
482 | if (!dev_priv->uncore.funcs.force_wake_put) | 513 | if (!dev_priv->uncore.funcs.force_wake_put) |
483 | return; | 514 | return; |
484 | 515 | ||
485 | fw_domains &= dev_priv->uncore.fw_domains; | 516 | fw_domains &= dev_priv->uncore.fw_domains; |
486 | 517 | ||
487 | for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) { | 518 | for_each_fw_domain_masked(domain, fw_domains, dev_priv) { |
488 | if (WARN_ON(domain->wake_count == 0)) | 519 | if (WARN_ON(domain->wake_count == 0)) |
489 | continue; | 520 | continue; |
490 | 521 | ||
491 | if (--domain->wake_count) | 522 | if (--domain->wake_count) |
492 | continue; | 523 | continue; |
493 | 524 | ||
494 | domain->wake_count++; | ||
495 | fw_domain_arm_timer(domain); | 525 | fw_domain_arm_timer(domain); |
496 | } | 526 | } |
497 | } | 527 | } |
@@ -539,18 +569,27 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, | |||
539 | void assert_forcewakes_inactive(struct drm_i915_private *dev_priv) | 569 | void assert_forcewakes_inactive(struct drm_i915_private *dev_priv) |
540 | { | 570 | { |
541 | struct intel_uncore_forcewake_domain *domain; | 571 | struct intel_uncore_forcewake_domain *domain; |
542 | enum forcewake_domain_id id; | ||
543 | 572 | ||
544 | if (!dev_priv->uncore.funcs.force_wake_get) | 573 | if (!dev_priv->uncore.funcs.force_wake_get) |
545 | return; | 574 | return; |
546 | 575 | ||
547 | for_each_fw_domain(domain, dev_priv, id) | 576 | for_each_fw_domain(domain, dev_priv) |
548 | WARN_ON(domain->wake_count); | 577 | WARN_ON(domain->wake_count); |
549 | } | 578 | } |
550 | 579 | ||
551 | /* We give fast paths for the really cool registers */ | 580 | /* We give fast paths for the really cool registers */ |
552 | #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000) | 581 | #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000) |
553 | 582 | ||
583 | #define __gen6_reg_read_fw_domains(offset) \ | ||
584 | ({ \ | ||
585 | enum forcewake_domains __fwd; \ | ||
586 | if (NEEDS_FORCE_WAKE(offset)) \ | ||
587 | __fwd = FORCEWAKE_RENDER; \ | ||
588 | else \ | ||
589 | __fwd = 0; \ | ||
590 | __fwd; \ | ||
591 | }) | ||
592 | |||
554 | #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end)) | 593 | #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end)) |
555 | 594 | ||
556 | #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \ | 595 | #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \ |
@@ -564,6 +603,48 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv) | |||
564 | REG_RANGE((reg), 0x22000, 0x24000) || \ | 603 | REG_RANGE((reg), 0x22000, 0x24000) || \ |
565 | REG_RANGE((reg), 0x30000, 0x40000)) | 604 | REG_RANGE((reg), 0x30000, 0x40000)) |
566 | 605 | ||
606 | #define __vlv_reg_read_fw_domains(offset) \ | ||
607 | ({ \ | ||
608 | enum forcewake_domains __fwd = 0; \ | ||
609 | if (!NEEDS_FORCE_WAKE(offset)) \ | ||
610 | __fwd = 0; \ | ||
611 | else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \ | ||
612 | __fwd = FORCEWAKE_RENDER; \ | ||
613 | else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \ | ||
614 | __fwd = FORCEWAKE_MEDIA; \ | ||
615 | __fwd; \ | ||
616 | }) | ||
617 | |||
618 | static const i915_reg_t gen8_shadowed_regs[] = { | ||
619 | GEN6_RPNSWREQ, | ||
620 | GEN6_RC_VIDEO_FREQ, | ||
621 | RING_TAIL(RENDER_RING_BASE), | ||
622 | RING_TAIL(GEN6_BSD_RING_BASE), | ||
623 | RING_TAIL(VEBOX_RING_BASE), | ||
624 | RING_TAIL(BLT_RING_BASE), | ||
625 | /* TODO: Other registers are not yet used */ | ||
626 | }; | ||
627 | |||
628 | static bool is_gen8_shadowed(u32 offset) | ||
629 | { | ||
630 | int i; | ||
631 | for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++) | ||
632 | if (offset == gen8_shadowed_regs[i].reg) | ||
633 | return true; | ||
634 | |||
635 | return false; | ||
636 | } | ||
637 | |||
638 | #define __gen8_reg_write_fw_domains(offset) \ | ||
639 | ({ \ | ||
640 | enum forcewake_domains __fwd; \ | ||
641 | if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \ | ||
642 | __fwd = FORCEWAKE_RENDER; \ | ||
643 | else \ | ||
644 | __fwd = 0; \ | ||
645 | __fwd; \ | ||
646 | }) | ||
647 | |||
567 | #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \ | 648 | #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \ |
568 | (REG_RANGE((reg), 0x2000, 0x4000) || \ | 649 | (REG_RANGE((reg), 0x2000, 0x4000) || \ |
569 | REG_RANGE((reg), 0x5200, 0x8000) || \ | 650 | REG_RANGE((reg), 0x5200, 0x8000) || \ |
@@ -586,6 +667,34 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv) | |||
586 | REG_RANGE((reg), 0x9000, 0xB000) || \ | 667 | REG_RANGE((reg), 0x9000, 0xB000) || \ |
587 | REG_RANGE((reg), 0xF000, 0x10000)) | 668 | REG_RANGE((reg), 0xF000, 0x10000)) |
588 | 669 | ||
670 | #define __chv_reg_read_fw_domains(offset) \ | ||
671 | ({ \ | ||
672 | enum forcewake_domains __fwd = 0; \ | ||
673 | if (!NEEDS_FORCE_WAKE(offset)) \ | ||
674 | __fwd = 0; \ | ||
675 | else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \ | ||
676 | __fwd = FORCEWAKE_RENDER; \ | ||
677 | else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \ | ||
678 | __fwd = FORCEWAKE_MEDIA; \ | ||
679 | else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \ | ||
680 | __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ | ||
681 | __fwd; \ | ||
682 | }) | ||
683 | |||
684 | #define __chv_reg_write_fw_domains(offset) \ | ||
685 | ({ \ | ||
686 | enum forcewake_domains __fwd = 0; \ | ||
687 | if (!NEEDS_FORCE_WAKE(offset) || is_gen8_shadowed(offset)) \ | ||
688 | __fwd = 0; \ | ||
689 | else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \ | ||
690 | __fwd = FORCEWAKE_RENDER; \ | ||
691 | else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \ | ||
692 | __fwd = FORCEWAKE_MEDIA; \ | ||
693 | else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \ | ||
694 | __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ | ||
695 | __fwd; \ | ||
696 | }) | ||
697 | |||
589 | #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \ | 698 | #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \ |
590 | REG_RANGE((reg), 0xB00, 0x2000) | 699 | REG_RANGE((reg), 0xB00, 0x2000) |
591 | 700 | ||
@@ -618,6 +727,61 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv) | |||
618 | !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \ | 727 | !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \ |
619 | !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) | 728 | !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) |
620 | 729 | ||
730 | #define SKL_NEEDS_FORCE_WAKE(reg) \ | ||
731 | ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg)) | ||
732 | |||
733 | #define __gen9_reg_read_fw_domains(offset) \ | ||
734 | ({ \ | ||
735 | enum forcewake_domains __fwd; \ | ||
736 | if (!SKL_NEEDS_FORCE_WAKE(offset)) \ | ||
737 | __fwd = 0; \ | ||
738 | else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \ | ||
739 | __fwd = FORCEWAKE_RENDER; \ | ||
740 | else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \ | ||
741 | __fwd = FORCEWAKE_MEDIA; \ | ||
742 | else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \ | ||
743 | __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ | ||
744 | else \ | ||
745 | __fwd = FORCEWAKE_BLITTER; \ | ||
746 | __fwd; \ | ||
747 | }) | ||
748 | |||
749 | static const i915_reg_t gen9_shadowed_regs[] = { | ||
750 | RING_TAIL(RENDER_RING_BASE), | ||
751 | RING_TAIL(GEN6_BSD_RING_BASE), | ||
752 | RING_TAIL(VEBOX_RING_BASE), | ||
753 | RING_TAIL(BLT_RING_BASE), | ||
754 | GEN6_RPNSWREQ, | ||
755 | GEN6_RC_VIDEO_FREQ, | ||
756 | /* TODO: Other registers are not yet used */ | ||
757 | }; | ||
758 | |||
759 | static bool is_gen9_shadowed(u32 offset) | ||
760 | { | ||
761 | int i; | ||
762 | for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++) | ||
763 | if (offset == gen9_shadowed_regs[i].reg) | ||
764 | return true; | ||
765 | |||
766 | return false; | ||
767 | } | ||
768 | |||
769 | #define __gen9_reg_write_fw_domains(offset) \ | ||
770 | ({ \ | ||
771 | enum forcewake_domains __fwd; \ | ||
772 | if (!SKL_NEEDS_FORCE_WAKE(offset) || is_gen9_shadowed(offset)) \ | ||
773 | __fwd = 0; \ | ||
774 | else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \ | ||
775 | __fwd = FORCEWAKE_RENDER; \ | ||
776 | else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \ | ||
777 | __fwd = FORCEWAKE_MEDIA; \ | ||
778 | else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \ | ||
779 | __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ | ||
780 | else \ | ||
781 | __fwd = FORCEWAKE_BLITTER; \ | ||
782 | __fwd; \ | ||
783 | }) | ||
784 | |||
621 | static void | 785 | static void |
622 | ilk_dummy_write(struct drm_i915_private *dev_priv) | 786 | ilk_dummy_write(struct drm_i915_private *dev_priv) |
623 | { | 787 | { |
@@ -633,15 +797,6 @@ __unclaimed_reg_debug(struct drm_i915_private *dev_priv, | |||
633 | const bool read, | 797 | const bool read, |
634 | const bool before) | 798 | const bool before) |
635 | { | 799 | { |
636 | /* XXX. We limit the auto arming traces for mmio | ||
637 | * debugs on these platforms. There are just too many | ||
638 | * revealed by these and CI/Bat suffers from the noise. | ||
639 | * Please fix and then re-enable the automatic traces. | ||
640 | */ | ||
641 | if (i915.mmio_debug < 2 && | ||
642 | (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) | ||
643 | return; | ||
644 | |||
645 | if (WARN(check_for_unclaimed_mmio(dev_priv), | 800 | if (WARN(check_for_unclaimed_mmio(dev_priv), |
646 | "Unclaimed register detected %s %s register 0x%x\n", | 801 | "Unclaimed register detected %s %s register 0x%x\n", |
647 | before ? "before" : "after", | 802 | before ? "before" : "after", |
@@ -720,19 +875,17 @@ static inline void __force_wake_auto(struct drm_i915_private *dev_priv, | |||
720 | enum forcewake_domains fw_domains) | 875 | enum forcewake_domains fw_domains) |
721 | { | 876 | { |
722 | struct intel_uncore_forcewake_domain *domain; | 877 | struct intel_uncore_forcewake_domain *domain; |
723 | enum forcewake_domain_id id; | ||
724 | 878 | ||
725 | if (WARN_ON(!fw_domains)) | 879 | if (WARN_ON(!fw_domains)) |
726 | return; | 880 | return; |
727 | 881 | ||
728 | /* Ideally GCC would be constant-fold and eliminate this loop */ | 882 | /* Ideally GCC would be constant-fold and eliminate this loop */ |
729 | for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) { | 883 | for_each_fw_domain_masked(domain, fw_domains, dev_priv) { |
730 | if (domain->wake_count) { | 884 | if (domain->wake_count) { |
731 | fw_domains &= ~(1 << id); | 885 | fw_domains &= ~domain->mask; |
732 | continue; | 886 | continue; |
733 | } | 887 | } |
734 | 888 | ||
735 | domain->wake_count++; | ||
736 | fw_domain_arm_timer(domain); | 889 | fw_domain_arm_timer(domain); |
737 | } | 890 | } |
738 | 891 | ||
@@ -743,9 +896,11 @@ static inline void __force_wake_auto(struct drm_i915_private *dev_priv, | |||
743 | #define __gen6_read(x) \ | 896 | #define __gen6_read(x) \ |
744 | static u##x \ | 897 | static u##x \ |
745 | gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ | 898 | gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ |
899 | enum forcewake_domains fw_engine; \ | ||
746 | GEN6_READ_HEADER(x); \ | 900 | GEN6_READ_HEADER(x); \ |
747 | if (NEEDS_FORCE_WAKE(offset)) \ | 901 | fw_engine = __gen6_reg_read_fw_domains(offset); \ |
748 | __force_wake_auto(dev_priv, FORCEWAKE_RENDER); \ | 902 | if (fw_engine) \ |
903 | __force_wake_auto(dev_priv, fw_engine); \ | ||
749 | val = __raw_i915_read##x(dev_priv, reg); \ | 904 | val = __raw_i915_read##x(dev_priv, reg); \ |
750 | GEN6_READ_FOOTER; \ | 905 | GEN6_READ_FOOTER; \ |
751 | } | 906 | } |
@@ -753,14 +908,9 @@ gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ | |||
753 | #define __vlv_read(x) \ | 908 | #define __vlv_read(x) \ |
754 | static u##x \ | 909 | static u##x \ |
755 | vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ | 910 | vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ |
756 | enum forcewake_domains fw_engine = 0; \ | 911 | enum forcewake_domains fw_engine; \ |
757 | GEN6_READ_HEADER(x); \ | 912 | GEN6_READ_HEADER(x); \ |
758 | if (!NEEDS_FORCE_WAKE(offset)) \ | 913 | fw_engine = __vlv_reg_read_fw_domains(offset); \ |
759 | fw_engine = 0; \ | ||
760 | else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \ | ||
761 | fw_engine = FORCEWAKE_RENDER; \ | ||
762 | else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \ | ||
763 | fw_engine = FORCEWAKE_MEDIA; \ | ||
764 | if (fw_engine) \ | 914 | if (fw_engine) \ |
765 | __force_wake_auto(dev_priv, fw_engine); \ | 915 | __force_wake_auto(dev_priv, fw_engine); \ |
766 | val = __raw_i915_read##x(dev_priv, reg); \ | 916 | val = __raw_i915_read##x(dev_priv, reg); \ |
@@ -770,40 +920,21 @@ vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ | |||
770 | #define __chv_read(x) \ | 920 | #define __chv_read(x) \ |
771 | static u##x \ | 921 | static u##x \ |
772 | chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ | 922 | chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ |
773 | enum forcewake_domains fw_engine = 0; \ | 923 | enum forcewake_domains fw_engine; \ |
774 | GEN6_READ_HEADER(x); \ | 924 | GEN6_READ_HEADER(x); \ |
775 | if (!NEEDS_FORCE_WAKE(offset)) \ | 925 | fw_engine = __chv_reg_read_fw_domains(offset); \ |
776 | fw_engine = 0; \ | ||
777 | else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \ | ||
778 | fw_engine = FORCEWAKE_RENDER; \ | ||
779 | else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \ | ||
780 | fw_engine = FORCEWAKE_MEDIA; \ | ||
781 | else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \ | ||
782 | fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ | ||
783 | if (fw_engine) \ | 926 | if (fw_engine) \ |
784 | __force_wake_auto(dev_priv, fw_engine); \ | 927 | __force_wake_auto(dev_priv, fw_engine); \ |
785 | val = __raw_i915_read##x(dev_priv, reg); \ | 928 | val = __raw_i915_read##x(dev_priv, reg); \ |
786 | GEN6_READ_FOOTER; \ | 929 | GEN6_READ_FOOTER; \ |
787 | } | 930 | } |
788 | 931 | ||
789 | #define SKL_NEEDS_FORCE_WAKE(reg) \ | ||
790 | ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg)) | ||
791 | |||
792 | #define __gen9_read(x) \ | 932 | #define __gen9_read(x) \ |
793 | static u##x \ | 933 | static u##x \ |
794 | gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ | 934 | gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ |
795 | enum forcewake_domains fw_engine; \ | 935 | enum forcewake_domains fw_engine; \ |
796 | GEN6_READ_HEADER(x); \ | 936 | GEN6_READ_HEADER(x); \ |
797 | if (!SKL_NEEDS_FORCE_WAKE(offset)) \ | 937 | fw_engine = __gen9_reg_read_fw_domains(offset); \ |
798 | fw_engine = 0; \ | ||
799 | else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \ | ||
800 | fw_engine = FORCEWAKE_RENDER; \ | ||
801 | else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \ | ||
802 | fw_engine = FORCEWAKE_MEDIA; \ | ||
803 | else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \ | ||
804 | fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ | ||
805 | else \ | ||
806 | fw_engine = FORCEWAKE_BLITTER; \ | ||
807 | if (fw_engine) \ | 938 | if (fw_engine) \ |
808 | __force_wake_auto(dev_priv, fw_engine); \ | 939 | __force_wake_auto(dev_priv, fw_engine); \ |
809 | val = __raw_i915_read##x(dev_priv, reg); \ | 940 | val = __raw_i915_read##x(dev_priv, reg); \ |
@@ -942,34 +1073,14 @@ hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool t | |||
942 | GEN6_WRITE_FOOTER; \ | 1073 | GEN6_WRITE_FOOTER; \ |
943 | } | 1074 | } |
944 | 1075 | ||
945 | static const i915_reg_t gen8_shadowed_regs[] = { | ||
946 | FORCEWAKE_MT, | ||
947 | GEN6_RPNSWREQ, | ||
948 | GEN6_RC_VIDEO_FREQ, | ||
949 | RING_TAIL(RENDER_RING_BASE), | ||
950 | RING_TAIL(GEN6_BSD_RING_BASE), | ||
951 | RING_TAIL(VEBOX_RING_BASE), | ||
952 | RING_TAIL(BLT_RING_BASE), | ||
953 | /* TODO: Other registers are not yet used */ | ||
954 | }; | ||
955 | |||
956 | static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, | ||
957 | i915_reg_t reg) | ||
958 | { | ||
959 | int i; | ||
960 | for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++) | ||
961 | if (i915_mmio_reg_equal(reg, gen8_shadowed_regs[i])) | ||
962 | return true; | ||
963 | |||
964 | return false; | ||
965 | } | ||
966 | |||
967 | #define __gen8_write(x) \ | 1076 | #define __gen8_write(x) \ |
968 | static void \ | 1077 | static void \ |
969 | gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ | 1078 | gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ |
1079 | enum forcewake_domains fw_engine; \ | ||
970 | GEN6_WRITE_HEADER; \ | 1080 | GEN6_WRITE_HEADER; \ |
971 | if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \ | 1081 | fw_engine = __gen8_reg_write_fw_domains(offset); \ |
972 | __force_wake_auto(dev_priv, FORCEWAKE_RENDER); \ | 1082 | if (fw_engine) \ |
1083 | __force_wake_auto(dev_priv, fw_engine); \ | ||
973 | __raw_i915_write##x(dev_priv, reg, val); \ | 1084 | __raw_i915_write##x(dev_priv, reg, val); \ |
974 | GEN6_WRITE_FOOTER; \ | 1085 | GEN6_WRITE_FOOTER; \ |
975 | } | 1086 | } |
@@ -977,64 +1088,22 @@ gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool | |||
977 | #define __chv_write(x) \ | 1088 | #define __chv_write(x) \ |
978 | static void \ | 1089 | static void \ |
979 | chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ | 1090 | chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ |
980 | enum forcewake_domains fw_engine = 0; \ | 1091 | enum forcewake_domains fw_engine; \ |
981 | GEN6_WRITE_HEADER; \ | 1092 | GEN6_WRITE_HEADER; \ |
982 | if (!NEEDS_FORCE_WAKE(offset) || \ | 1093 | fw_engine = __chv_reg_write_fw_domains(offset); \ |
983 | is_gen8_shadowed(dev_priv, reg)) \ | ||
984 | fw_engine = 0; \ | ||
985 | else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \ | ||
986 | fw_engine = FORCEWAKE_RENDER; \ | ||
987 | else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \ | ||
988 | fw_engine = FORCEWAKE_MEDIA; \ | ||
989 | else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \ | ||
990 | fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ | ||
991 | if (fw_engine) \ | 1094 | if (fw_engine) \ |
992 | __force_wake_auto(dev_priv, fw_engine); \ | 1095 | __force_wake_auto(dev_priv, fw_engine); \ |
993 | __raw_i915_write##x(dev_priv, reg, val); \ | 1096 | __raw_i915_write##x(dev_priv, reg, val); \ |
994 | GEN6_WRITE_FOOTER; \ | 1097 | GEN6_WRITE_FOOTER; \ |
995 | } | 1098 | } |
996 | 1099 | ||
997 | static const i915_reg_t gen9_shadowed_regs[] = { | ||
998 | RING_TAIL(RENDER_RING_BASE), | ||
999 | RING_TAIL(GEN6_BSD_RING_BASE), | ||
1000 | RING_TAIL(VEBOX_RING_BASE), | ||
1001 | RING_TAIL(BLT_RING_BASE), | ||
1002 | FORCEWAKE_BLITTER_GEN9, | ||
1003 | FORCEWAKE_RENDER_GEN9, | ||
1004 | FORCEWAKE_MEDIA_GEN9, | ||
1005 | GEN6_RPNSWREQ, | ||
1006 | GEN6_RC_VIDEO_FREQ, | ||
1007 | /* TODO: Other registers are not yet used */ | ||
1008 | }; | ||
1009 | |||
1010 | static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, | ||
1011 | i915_reg_t reg) | ||
1012 | { | ||
1013 | int i; | ||
1014 | for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++) | ||
1015 | if (i915_mmio_reg_equal(reg, gen9_shadowed_regs[i])) | ||
1016 | return true; | ||
1017 | |||
1018 | return false; | ||
1019 | } | ||
1020 | |||
1021 | #define __gen9_write(x) \ | 1100 | #define __gen9_write(x) \ |
1022 | static void \ | 1101 | static void \ |
1023 | gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \ | 1102 | gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \ |
1024 | bool trace) { \ | 1103 | bool trace) { \ |
1025 | enum forcewake_domains fw_engine; \ | 1104 | enum forcewake_domains fw_engine; \ |
1026 | GEN6_WRITE_HEADER; \ | 1105 | GEN6_WRITE_HEADER; \ |
1027 | if (!SKL_NEEDS_FORCE_WAKE(offset) || \ | 1106 | fw_engine = __gen9_reg_write_fw_domains(offset); \ |
1028 | is_gen9_shadowed(dev_priv, reg)) \ | ||
1029 | fw_engine = 0; \ | ||
1030 | else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \ | ||
1031 | fw_engine = FORCEWAKE_RENDER; \ | ||
1032 | else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \ | ||
1033 | fw_engine = FORCEWAKE_MEDIA; \ | ||
1034 | else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \ | ||
1035 | fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ | ||
1036 | else \ | ||
1037 | fw_engine = FORCEWAKE_BLITTER; \ | ||
1038 | if (fw_engine) \ | 1107 | if (fw_engine) \ |
1039 | __force_wake_auto(dev_priv, fw_engine); \ | 1108 | __force_wake_auto(dev_priv, fw_engine); \ |
1040 | __raw_i915_write##x(dev_priv, reg, val); \ | 1109 | __raw_i915_write##x(dev_priv, reg, val); \ |
@@ -1150,7 +1219,14 @@ static void fw_domain_init(struct drm_i915_private *dev_priv, | |||
1150 | d->i915 = dev_priv; | 1219 | d->i915 = dev_priv; |
1151 | d->id = domain_id; | 1220 | d->id = domain_id; |
1152 | 1221 | ||
1153 | setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d); | 1222 | BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER)); |
1223 | BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER)); | ||
1224 | BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA)); | ||
1225 | |||
1226 | d->mask = 1 << domain_id; | ||
1227 | |||
1228 | hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
1229 | d->timer.function = intel_uncore_fw_release_timer; | ||
1154 | 1230 | ||
1155 | dev_priv->uncore.fw_domains |= (1 << domain_id); | 1231 | dev_priv->uncore.fw_domains |= (1 << domain_id); |
1156 | 1232 | ||
@@ -1189,7 +1265,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) | |||
1189 | } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { | 1265 | } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { |
1190 | dev_priv->uncore.funcs.force_wake_get = | 1266 | dev_priv->uncore.funcs.force_wake_get = |
1191 | fw_domains_get_with_thread_status; | 1267 | fw_domains_get_with_thread_status; |
1192 | dev_priv->uncore.funcs.force_wake_put = fw_domains_put; | 1268 | if (IS_HASWELL(dev)) |
1269 | dev_priv->uncore.funcs.force_wake_put = | ||
1270 | fw_domains_put_with_fifo; | ||
1271 | else | ||
1272 | dev_priv->uncore.funcs.force_wake_put = fw_domains_put; | ||
1193 | fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, | 1273 | fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, |
1194 | FORCEWAKE_MT, FORCEWAKE_ACK_HSW); | 1274 | FORCEWAKE_MT, FORCEWAKE_ACK_HSW); |
1195 | } else if (IS_IVYBRIDGE(dev)) { | 1275 | } else if (IS_IVYBRIDGE(dev)) { |
@@ -1253,7 +1333,7 @@ void intel_uncore_init(struct drm_device *dev) | |||
1253 | 1333 | ||
1254 | i915_check_vgpu(dev); | 1334 | i915_check_vgpu(dev); |
1255 | 1335 | ||
1256 | intel_uncore_ellc_detect(dev); | 1336 | intel_uncore_edram_detect(dev_priv); |
1257 | intel_uncore_fw_domains_init(dev); | 1337 | intel_uncore_fw_domains_init(dev); |
1258 | __intel_uncore_early_sanitize(dev, false); | 1338 | __intel_uncore_early_sanitize(dev, false); |
1259 | 1339 | ||
@@ -1715,3 +1795,111 @@ intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv) | |||
1715 | 1795 | ||
1716 | return false; | 1796 | return false; |
1717 | } | 1797 | } |
1798 | |||
1799 | static enum forcewake_domains | ||
1800 | intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv, | ||
1801 | i915_reg_t reg) | ||
1802 | { | ||
1803 | enum forcewake_domains fw_domains; | ||
1804 | |||
1805 | if (intel_vgpu_active(dev_priv->dev)) | ||
1806 | return 0; | ||
1807 | |||
1808 | switch (INTEL_INFO(dev_priv)->gen) { | ||
1809 | case 9: | ||
1810 | fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg)); | ||
1811 | break; | ||
1812 | case 8: | ||
1813 | if (IS_CHERRYVIEW(dev_priv)) | ||
1814 | fw_domains = __chv_reg_read_fw_domains(i915_mmio_reg_offset(reg)); | ||
1815 | else | ||
1816 | fw_domains = __gen6_reg_read_fw_domains(i915_mmio_reg_offset(reg)); | ||
1817 | break; | ||
1818 | case 7: | ||
1819 | case 6: | ||
1820 | if (IS_VALLEYVIEW(dev_priv)) | ||
1821 | fw_domains = __vlv_reg_read_fw_domains(i915_mmio_reg_offset(reg)); | ||
1822 | else | ||
1823 | fw_domains = __gen6_reg_read_fw_domains(i915_mmio_reg_offset(reg)); | ||
1824 | break; | ||
1825 | default: | ||
1826 | MISSING_CASE(INTEL_INFO(dev_priv)->gen); | ||
1827 | case 5: /* forcewake was introduced with gen6 */ | ||
1828 | case 4: | ||
1829 | case 3: | ||
1830 | case 2: | ||
1831 | return 0; | ||
1832 | } | ||
1833 | |||
1834 | WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains); | ||
1835 | |||
1836 | return fw_domains; | ||
1837 | } | ||
1838 | |||
1839 | static enum forcewake_domains | ||
1840 | intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv, | ||
1841 | i915_reg_t reg) | ||
1842 | { | ||
1843 | enum forcewake_domains fw_domains; | ||
1844 | |||
1845 | if (intel_vgpu_active(dev_priv->dev)) | ||
1846 | return 0; | ||
1847 | |||
1848 | switch (INTEL_INFO(dev_priv)->gen) { | ||
1849 | case 9: | ||
1850 | fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg)); | ||
1851 | break; | ||
1852 | case 8: | ||
1853 | if (IS_CHERRYVIEW(dev_priv)) | ||
1854 | fw_domains = __chv_reg_write_fw_domains(i915_mmio_reg_offset(reg)); | ||
1855 | else | ||
1856 | fw_domains = __gen8_reg_write_fw_domains(i915_mmio_reg_offset(reg)); | ||
1857 | break; | ||
1858 | case 7: | ||
1859 | case 6: | ||
1860 | fw_domains = FORCEWAKE_RENDER; | ||
1861 | break; | ||
1862 | default: | ||
1863 | MISSING_CASE(INTEL_INFO(dev_priv)->gen); | ||
1864 | case 5: | ||
1865 | case 4: | ||
1866 | case 3: | ||
1867 | case 2: | ||
1868 | return 0; | ||
1869 | } | ||
1870 | |||
1871 | WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains); | ||
1872 | |||
1873 | return fw_domains; | ||
1874 | } | ||
1875 | |||
1876 | /** | ||
1877 | * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access | ||
1878 | * a register | ||
1879 | * @dev_priv: pointer to struct drm_i915_private | ||
1880 | * @reg: register in question | ||
1881 | * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE | ||
1882 | * | ||
1883 | * Returns a set of forcewake domains required to be taken with for example | ||
1884 | * intel_uncore_forcewake_get for the specified register to be accessible in the | ||
1885 | * specified mode (read, write or read/write) with raw mmio accessors. | ||
1886 | * | ||
1887 | * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the | ||
1888 | * callers to do FIFO management on their own or risk losing writes. | ||
1889 | */ | ||
1890 | enum forcewake_domains | ||
1891 | intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, | ||
1892 | i915_reg_t reg, unsigned int op) | ||
1893 | { | ||
1894 | enum forcewake_domains fw_domains = 0; | ||
1895 | |||
1896 | WARN_ON(!op); | ||
1897 | |||
1898 | if (op & FW_REG_READ) | ||
1899 | fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg); | ||
1900 | |||
1901 | if (op & FW_REG_WRITE) | ||
1902 | fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg); | ||
1903 | |||
1904 | return fw_domains; | ||
1905 | } | ||