aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2016-06-08 22:11:29 -0400
committerDave Airlie <airlied@redhat.com>2016-06-08 22:11:29 -0400
commit5b735940aa11113abd369e8b3a144c68b0ff5ffa (patch)
tree9c237cd5136165efe6efd026cca7adab17dd5d08
parent2cca45574007b4a77fa5f63ea45d664510cec22a (diff)
parent1750d59dfa3caf1fc5354a2217f0e83d717c9b22 (diff)
Merge tag 'drm-intel-next-2016-06-06' of git://anongit.freedesktop.org/drm-intel into drm-next
- some polish for the guc code (Dave Gordon) - big refactoring of gen9 display clock handling code (Ville) - refactoring work in the context code (Chris Wilson) - give encoder/crtc/planes useful names for debug output (Ville) - improvements to skl/kbl wm computation code (Mahesh Kumar) - bunch of smaller improvements all over as usual * tag 'drm-intel-next-2016-06-06' of git://anongit.freedesktop.org/drm-intel: (64 commits) drm/i915: Update DRIVER_DATE to 20160606 drm/i915: Extract physical display dimensions from VBT drm/i915: Silence "unexpected child device config size" for VBT on 845g drm/i915/skl+: Use scaling amount for plane data rate calculation (v4) drm/i915/skl+: calculate plane pixel rate (v4) drm/i915/skl+: calculate ddb minimum allocation (v6) drm/i915: Don't try to calculate relative data rates during hw readout drm/i915: Only ignore eDP ports that are connected drm/i915: Update GEN6_PMINTRMSK setup with GuC enabled drm/i915: kill STANDARD/CURSOR plane screams drm/i915: Give encoders useful names drm/i915: Give meaningful names to all the planes drm/i915: Don't leak primary/cursor planes on crtc init failure drm/i915: Set crtc->name to "pipe A", "pipe B", etc. drm/i915: Use plane->name in debug prints drm/i915: Use crtc->name in debug messages drm/i915: Reject modeset if the dotclock is too high drm/i915: Fix NULL pointer deference when out of PLLs in IVB drm/i915/ilk: Don't disable SSC source if it's in use drm/i915/bxt: Sanitize CDCLK to fix breakage during S4 resume ...
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c106
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c12
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c18
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h104
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c17
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c209
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c12
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c167
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c30
-rw-r--r--drivers/gpu/drm/i915/i915_params.c14
-rw-r--r--drivers/gpu/drm/i915/i915_params.h3
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h12
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c9
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c863
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c41
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c38
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h9
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c5
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c18
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c4
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h37
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h3
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c153
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c145
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h9
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c4
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c105
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c176
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h4
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c56
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c16
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h7
41 files changed, 1467 insertions, 957 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0a4bedb96d65..e4f2c55d9697 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -199,13 +199,6 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
199 seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); 199 seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
200} 200}
201 201
202static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
203{
204 seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
205 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
206 seq_putc(m, ' ');
207}
208
209static int i915_gem_object_list_info(struct seq_file *m, void *data) 202static int i915_gem_object_list_info(struct seq_file *m, void *data)
210{ 203{
211 struct drm_info_node *node = m->private; 204 struct drm_info_node *node = m->private;
@@ -424,6 +417,42 @@ static void print_batch_pool_stats(struct seq_file *m,
424 print_file_stats(m, "[k]batch pool", stats); 417 print_file_stats(m, "[k]batch pool", stats);
425} 418}
426 419
420static int per_file_ctx_stats(int id, void *ptr, void *data)
421{
422 struct i915_gem_context *ctx = ptr;
423 int n;
424
425 for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
426 if (ctx->engine[n].state)
427 per_file_stats(0, ctx->engine[n].state, data);
428 if (ctx->engine[n].ringbuf)
429 per_file_stats(0, ctx->engine[n].ringbuf->obj, data);
430 }
431
432 return 0;
433}
434
435static void print_context_stats(struct seq_file *m,
436 struct drm_i915_private *dev_priv)
437{
438 struct file_stats stats;
439 struct drm_file *file;
440
441 memset(&stats, 0, sizeof(stats));
442
443 mutex_lock(&dev_priv->dev->struct_mutex);
444 if (dev_priv->kernel_context)
445 per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
446
447 list_for_each_entry(file, &dev_priv->dev->filelist, lhead) {
448 struct drm_i915_file_private *fpriv = file->driver_priv;
449 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
450 }
451 mutex_unlock(&dev_priv->dev->struct_mutex);
452
453 print_file_stats(m, "[k]contexts", stats);
454}
455
427#define count_vmas(list, member) do { \ 456#define count_vmas(list, member) do { \
428 list_for_each_entry(vma, list, member) { \ 457 list_for_each_entry(vma, list, member) { \
429 size += i915_gem_obj_total_ggtt_size(vma->obj); \ 458 size += i915_gem_obj_total_ggtt_size(vma->obj); \
@@ -528,10 +557,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
528 557
529 seq_putc(m, '\n'); 558 seq_putc(m, '\n');
530 print_batch_pool_stats(m, dev_priv); 559 print_batch_pool_stats(m, dev_priv);
531
532 mutex_unlock(&dev->struct_mutex); 560 mutex_unlock(&dev->struct_mutex);
533 561
534 mutex_lock(&dev->filelist_mutex); 562 mutex_lock(&dev->filelist_mutex);
563 print_context_stats(m, dev_priv);
535 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 564 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
536 struct file_stats stats; 565 struct file_stats stats;
537 struct task_struct *task; 566 struct task_struct *task;
@@ -1279,6 +1308,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
1279 } 1308 }
1280 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n", 1309 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1281 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask); 1310 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
1311 seq_printf(m, "pm_intr_keep: 0x%08x\n", dev_priv->rps.pm_intr_keep);
1282 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 1312 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1283 seq_printf(m, "Render p-state ratio: %d\n", 1313 seq_printf(m, "Render p-state ratio: %d\n",
1284 (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8); 1314 (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
@@ -1989,8 +2019,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
1989 struct drm_device *dev = node->minor->dev; 2019 struct drm_device *dev = node->minor->dev;
1990 struct drm_i915_private *dev_priv = dev->dev_private; 2020 struct drm_i915_private *dev_priv = dev->dev_private;
1991 struct intel_engine_cs *engine; 2021 struct intel_engine_cs *engine;
1992 struct intel_context *ctx; 2022 struct i915_gem_context *ctx;
1993 enum intel_engine_id id;
1994 int ret; 2023 int ret;
1995 2024
1996 ret = mutex_lock_interruptible(&dev->struct_mutex); 2025 ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1998,32 +2027,36 @@ static int i915_context_status(struct seq_file *m, void *unused)
1998 return ret; 2027 return ret;
1999 2028
2000 list_for_each_entry(ctx, &dev_priv->context_list, link) { 2029 list_for_each_entry(ctx, &dev_priv->context_list, link) {
2001 if (!i915.enable_execlists &&
2002 ctx->legacy_hw_ctx.rcs_state == NULL)
2003 continue;
2004
2005 seq_printf(m, "HW context %u ", ctx->hw_id); 2030 seq_printf(m, "HW context %u ", ctx->hw_id);
2006 describe_ctx(m, ctx); 2031 if (IS_ERR(ctx->file_priv)) {
2007 if (ctx == dev_priv->kernel_context) 2032 seq_puts(m, "(deleted) ");
2008 seq_printf(m, "(kernel context) "); 2033 } else if (ctx->file_priv) {
2034 struct pid *pid = ctx->file_priv->file->pid;
2035 struct task_struct *task;
2009 2036
2010 if (i915.enable_execlists) { 2037 task = get_pid_task(pid, PIDTYPE_PID);
2011 seq_putc(m, '\n'); 2038 if (task) {
2012 for_each_engine_id(engine, dev_priv, id) { 2039 seq_printf(m, "(%s [%d]) ",
2013 struct drm_i915_gem_object *ctx_obj = 2040 task->comm, task->pid);
2014 ctx->engine[id].state; 2041 put_task_struct(task);
2015 struct intel_ringbuffer *ringbuf =
2016 ctx->engine[id].ringbuf;
2017
2018 seq_printf(m, "%s: ", engine->name);
2019 if (ctx_obj)
2020 describe_obj(m, ctx_obj);
2021 if (ringbuf)
2022 describe_ctx_ringbuf(m, ringbuf);
2023 seq_putc(m, '\n');
2024 } 2042 }
2025 } else { 2043 } else {
2026 describe_obj(m, ctx->legacy_hw_ctx.rcs_state); 2044 seq_puts(m, "(kernel) ");
2045 }
2046
2047 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
2048 seq_putc(m, '\n');
2049
2050 for_each_engine(engine, dev_priv) {
2051 struct intel_context *ce = &ctx->engine[engine->id];
2052
2053 seq_printf(m, "%s: ", engine->name);
2054 seq_putc(m, ce->initialised ? 'I' : 'i');
2055 if (ce->state)
2056 describe_obj(m, ce->state);
2057 if (ce->ringbuf)
2058 describe_ctx_ringbuf(m, ce->ringbuf);
2059 seq_putc(m, '\n');
2027 } 2060 }
2028 2061
2029 seq_putc(m, '\n'); 2062 seq_putc(m, '\n');
@@ -2035,13 +2068,13 @@ static int i915_context_status(struct seq_file *m, void *unused)
2035} 2068}
2036 2069
2037static void i915_dump_lrc_obj(struct seq_file *m, 2070static void i915_dump_lrc_obj(struct seq_file *m,
2038 struct intel_context *ctx, 2071 struct i915_gem_context *ctx,
2039 struct intel_engine_cs *engine) 2072 struct intel_engine_cs *engine)
2040{ 2073{
2074 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
2041 struct page *page; 2075 struct page *page;
2042 uint32_t *reg_state; 2076 uint32_t *reg_state;
2043 int j; 2077 int j;
2044 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
2045 unsigned long ggtt_offset = 0; 2078 unsigned long ggtt_offset = 0;
2046 2079
2047 seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id); 2080 seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
@@ -2083,7 +2116,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
2083 struct drm_device *dev = node->minor->dev; 2116 struct drm_device *dev = node->minor->dev;
2084 struct drm_i915_private *dev_priv = dev->dev_private; 2117 struct drm_i915_private *dev_priv = dev->dev_private;
2085 struct intel_engine_cs *engine; 2118 struct intel_engine_cs *engine;
2086 struct intel_context *ctx; 2119 struct i915_gem_context *ctx;
2087 int ret; 2120 int ret;
2088 2121
2089 if (!i915.enable_execlists) { 2122 if (!i915.enable_execlists) {
@@ -2263,7 +2296,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
2263 2296
2264static int per_file_ctx(int id, void *ptr, void *data) 2297static int per_file_ctx(int id, void *ptr, void *data)
2265{ 2298{
2266 struct intel_context *ctx = ptr; 2299 struct i915_gem_context *ctx = ptr;
2267 struct seq_file *m = data; 2300 struct seq_file *m = data;
2268 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 2301 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2269 2302
@@ -2504,6 +2537,7 @@ static void i915_guc_client_info(struct seq_file *m,
2504 seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n", 2537 seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
2505 client->wq_size, client->wq_offset, client->wq_tail); 2538 client->wq_size, client->wq_offset, client->wq_tail);
2506 2539
2540 seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space);
2507 seq_printf(m, "\tFailed to queue: %u\n", client->q_fail); 2541 seq_printf(m, "\tFailed to queue: %u\n", client->q_fail);
2508 seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail); 2542 seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
2509 seq_printf(m, "\tLast submission result: %d\n", client->retcode); 2543 seq_printf(m, "\tLast submission result: %d\n", client->retcode);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index fd06bff216ff..07edaed9d5a2 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -507,7 +507,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
507 * working irqs for e.g. gmbus and dp aux transfers. */ 507 * working irqs for e.g. gmbus and dp aux transfers. */
508 intel_modeset_init(dev); 508 intel_modeset_init(dev);
509 509
510 intel_guc_ucode_init(dev); 510 intel_guc_init(dev);
511 511
512 ret = i915_gem_init(dev); 512 ret = i915_gem_init(dev);
513 if (ret) 513 if (ret)
@@ -544,7 +544,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
544cleanup_gem: 544cleanup_gem:
545 i915_gem_fini(dev); 545 i915_gem_fini(dev);
546cleanup_irq: 546cleanup_irq:
547 intel_guc_ucode_fini(dev); 547 intel_guc_fini(dev);
548 drm_irq_uninstall(dev); 548 drm_irq_uninstall(dev);
549 intel_teardown_gmbus(dev); 549 intel_teardown_gmbus(dev);
550cleanup_csr: 550cleanup_csr:
@@ -1307,7 +1307,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1307 1307
1308 intel_uncore_sanitize(dev_priv); 1308 intel_uncore_sanitize(dev_priv);
1309 1309
1310 intel_opregion_setup(dev); 1310 intel_opregion_setup(dev_priv);
1311 1311
1312 i915_gem_load_init_fences(dev_priv); 1312 i915_gem_load_init_fences(dev_priv);
1313 1313
@@ -1376,7 +1376,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
1376 1376
1377 if (INTEL_INFO(dev_priv)->num_pipes) { 1377 if (INTEL_INFO(dev_priv)->num_pipes) {
1378 /* Must be done after probing outputs */ 1378 /* Must be done after probing outputs */
1379 intel_opregion_init(dev); 1379 intel_opregion_register(dev_priv);
1380 acpi_video_register(); 1380 acpi_video_register();
1381 } 1381 }
1382 1382
@@ -1395,7 +1395,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
1395 i915_audio_component_cleanup(dev_priv); 1395 i915_audio_component_cleanup(dev_priv);
1396 intel_gpu_ips_teardown(); 1396 intel_gpu_ips_teardown();
1397 acpi_video_unregister(); 1397 acpi_video_unregister();
1398 intel_opregion_fini(dev_priv->dev); 1398 intel_opregion_unregister(dev_priv);
1399 i915_teardown_sysfs(dev_priv->dev); 1399 i915_teardown_sysfs(dev_priv->dev);
1400 i915_gem_shrinker_cleanup(dev_priv); 1400 i915_gem_shrinker_cleanup(dev_priv);
1401} 1401}
@@ -1527,7 +1527,7 @@ int i915_driver_unload(struct drm_device *dev)
1527 /* Flush any outstanding unpin_work. */ 1527 /* Flush any outstanding unpin_work. */
1528 flush_workqueue(dev_priv->wq); 1528 flush_workqueue(dev_priv->wq);
1529 1529
1530 intel_guc_ucode_fini(dev); 1530 intel_guc_fini(dev);
1531 i915_gem_fini(dev); 1531 i915_gem_fini(dev);
1532 intel_fbc_cleanup_cfb(dev_priv); 1532 intel_fbc_cleanup_cfb(dev_priv);
1533 1533
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 61bf5a92040d..872c60608dbd 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -626,10 +626,10 @@ static int i915_drm_suspend(struct drm_device *dev)
626 i915_save_state(dev); 626 i915_save_state(dev);
627 627
628 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; 628 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
629 intel_opregion_notify_adapter(dev, opregion_target_state); 629 intel_opregion_notify_adapter(dev_priv, opregion_target_state);
630 630
631 intel_uncore_forcewake_reset(dev_priv, false); 631 intel_uncore_forcewake_reset(dev_priv, false);
632 intel_opregion_fini(dev); 632 intel_opregion_unregister(dev_priv);
633 633
634 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); 634 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
635 635
@@ -747,7 +747,7 @@ static int i915_drm_resume(struct drm_device *dev)
747 mutex_unlock(&dev->struct_mutex); 747 mutex_unlock(&dev->struct_mutex);
748 748
749 i915_restore_state(dev); 749 i915_restore_state(dev);
750 intel_opregion_setup(dev); 750 intel_opregion_setup(dev_priv);
751 751
752 intel_init_pch_refclk(dev); 752 intel_init_pch_refclk(dev);
753 drm_mode_config_reset(dev); 753 drm_mode_config_reset(dev);
@@ -792,7 +792,7 @@ static int i915_drm_resume(struct drm_device *dev)
792 /* Config may have changed between suspend and resume */ 792 /* Config may have changed between suspend and resume */
793 drm_helper_hpd_irq_event(dev); 793 drm_helper_hpd_irq_event(dev);
794 794
795 intel_opregion_init(dev); 795 intel_opregion_register(dev_priv);
796 796
797 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); 797 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
798 798
@@ -800,7 +800,7 @@ static int i915_drm_resume(struct drm_device *dev)
800 dev_priv->modeset_restore = MODESET_DONE; 800 dev_priv->modeset_restore = MODESET_DONE;
801 mutex_unlock(&dev_priv->modeset_restore_lock); 801 mutex_unlock(&dev_priv->modeset_restore_lock);
802 802
803 intel_opregion_notify_adapter(dev, PCI_D0); 803 intel_opregion_notify_adapter(dev_priv, PCI_D0);
804 804
805 drm_kms_helper_poll_enable(dev); 805 drm_kms_helper_poll_enable(dev);
806 806
@@ -1588,14 +1588,14 @@ static int intel_runtime_suspend(struct device *device)
1588 * FIXME: We really should find a document that references the arguments 1588 * FIXME: We really should find a document that references the arguments
1589 * used below! 1589 * used below!
1590 */ 1590 */
1591 if (IS_BROADWELL(dev)) { 1591 if (IS_BROADWELL(dev_priv)) {
1592 /* 1592 /*
1593 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop 1593 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
1594 * being detected, and the call we do at intel_runtime_resume() 1594 * being detected, and the call we do at intel_runtime_resume()
1595 * won't be able to restore them. Since PCI_D3hot matches the 1595 * won't be able to restore them. Since PCI_D3hot matches the
1596 * actual specification and appears to be working, use it. 1596 * actual specification and appears to be working, use it.
1597 */ 1597 */
1598 intel_opregion_notify_adapter(dev, PCI_D3hot); 1598 intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
1599 } else { 1599 } else {
1600 /* 1600 /*
1601 * current versions of firmware which depend on this opregion 1601 * current versions of firmware which depend on this opregion
@@ -1604,7 +1604,7 @@ static int intel_runtime_suspend(struct device *device)
1604 * to distinguish it from notifications that might be sent via 1604 * to distinguish it from notifications that might be sent via
1605 * the suspend path. 1605 * the suspend path.
1606 */ 1606 */
1607 intel_opregion_notify_adapter(dev, PCI_D1); 1607 intel_opregion_notify_adapter(dev_priv, PCI_D1);
1608 } 1608 }
1609 1609
1610 assert_forcewakes_inactive(dev_priv); 1610 assert_forcewakes_inactive(dev_priv);
@@ -1628,7 +1628,7 @@ static int intel_runtime_resume(struct device *device)
1628 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); 1628 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
1629 disable_rpm_wakeref_asserts(dev_priv); 1629 disable_rpm_wakeref_asserts(dev_priv);
1630 1630
1631 intel_opregion_notify_adapter(dev, PCI_D0); 1631 intel_opregion_notify_adapter(dev_priv, PCI_D0);
1632 dev_priv->pm.suspended = false; 1632 dev_priv->pm.suspended = false;
1633 if (intel_uncore_unclaimed_mmio(dev_priv)) 1633 if (intel_uncore_unclaimed_mmio(dev_priv))
1634 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); 1634 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b4ea941d87f3..0113207967d9 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -66,7 +66,7 @@
66 66
67#define DRIVER_NAME "i915" 67#define DRIVER_NAME "i915"
68#define DRIVER_DESC "Intel Graphics" 68#define DRIVER_DESC "Intel Graphics"
69#define DRIVER_DATE "20160522" 69#define DRIVER_DATE "20160606"
70 70
71#undef WARN_ON 71#undef WARN_ON
72/* Many gcc seem to no see through this and fall over :( */ 72/* Many gcc seem to no see through this and fall over :( */
@@ -834,9 +834,8 @@ struct i915_ctx_hang_stats {
834/* This must match up with the value previously used for execbuf2.rsvd1. */ 834/* This must match up with the value previously used for execbuf2.rsvd1. */
835#define DEFAULT_CONTEXT_HANDLE 0 835#define DEFAULT_CONTEXT_HANDLE 0
836 836
837#define CONTEXT_NO_ZEROMAP (1<<0)
838/** 837/**
839 * struct intel_context - as the name implies, represents a context. 838 * struct i915_gem_context - as the name implies, represents a context.
840 * @ref: reference count. 839 * @ref: reference count.
841 * @user_handle: userspace tracking identity for this context. 840 * @user_handle: userspace tracking identity for this context.
842 * @remap_slice: l3 row remapping information. 841 * @remap_slice: l3 row remapping information.
@@ -854,37 +853,33 @@ struct i915_ctx_hang_stats {
854 * Contexts are memory images used by the hardware to store copies of their 853 * Contexts are memory images used by the hardware to store copies of their
855 * internal state. 854 * internal state.
856 */ 855 */
857struct intel_context { 856struct i915_gem_context {
858 struct kref ref; 857 struct kref ref;
859 int user_handle;
860 uint8_t remap_slice;
861 struct drm_i915_private *i915; 858 struct drm_i915_private *i915;
862 int flags;
863 struct drm_i915_file_private *file_priv; 859 struct drm_i915_file_private *file_priv;
864 struct i915_ctx_hang_stats hang_stats;
865 struct i915_hw_ppgtt *ppgtt; 860 struct i915_hw_ppgtt *ppgtt;
866 861
862 struct i915_ctx_hang_stats hang_stats;
863
867 /* Unique identifier for this context, used by the hw for tracking */ 864 /* Unique identifier for this context, used by the hw for tracking */
865 unsigned long flags;
868 unsigned hw_id; 866 unsigned hw_id;
867 u32 user_handle;
868#define CONTEXT_NO_ZEROMAP (1<<0)
869 869
870 /* Legacy ring buffer submission */ 870 struct intel_context {
871 struct {
872 struct drm_i915_gem_object *rcs_state;
873 bool initialized;
874 } legacy_hw_ctx;
875
876 /* Execlists */
877 struct {
878 struct drm_i915_gem_object *state; 871 struct drm_i915_gem_object *state;
879 struct intel_ringbuffer *ringbuf; 872 struct intel_ringbuffer *ringbuf;
880 int pin_count;
881 struct i915_vma *lrc_vma; 873 struct i915_vma *lrc_vma;
882 u64 lrc_desc;
883 uint32_t *lrc_reg_state; 874 uint32_t *lrc_reg_state;
875 u64 lrc_desc;
876 int pin_count;
884 bool initialised; 877 bool initialised;
885 } engine[I915_NUM_ENGINES]; 878 } engine[I915_NUM_ENGINES];
886 879
887 struct list_head link; 880 struct list_head link;
881
882 u8 remap_slice;
888}; 883};
889 884
890enum fb_op_origin { 885enum fb_op_origin {
@@ -1132,6 +1127,8 @@ struct intel_gen6_power_mgmt {
1132 bool interrupts_enabled; 1127 bool interrupts_enabled;
1133 u32 pm_iir; 1128 u32 pm_iir;
1134 1129
1130 u32 pm_intr_keep;
1131
1135 /* Frequencies are stored in potentially platform dependent multiples. 1132 /* Frequencies are stored in potentially platform dependent multiples.
1136 * In other words, *_freq needs to be multiplied by X to be interesting. 1133 * In other words, *_freq needs to be multiplied by X to be interesting.
1137 * Soft limits are those which are used for the dynamic reclocking done 1134 * Soft limits are those which are used for the dynamic reclocking done
@@ -1715,7 +1712,7 @@ struct i915_execbuffer_params {
1715 uint64_t batch_obj_vm_offset; 1712 uint64_t batch_obj_vm_offset;
1716 struct intel_engine_cs *engine; 1713 struct intel_engine_cs *engine;
1717 struct drm_i915_gem_object *batch_obj; 1714 struct drm_i915_gem_object *batch_obj;
1718 struct intel_context *ctx; 1715 struct i915_gem_context *ctx;
1719 struct drm_i915_gem_request *request; 1716 struct drm_i915_gem_request *request;
1720}; 1717};
1721 1718
@@ -1765,6 +1762,7 @@ struct drm_i915_private {
1765 wait_queue_head_t gmbus_wait_queue; 1762 wait_queue_head_t gmbus_wait_queue;
1766 1763
1767 struct pci_dev *bridge_dev; 1764 struct pci_dev *bridge_dev;
1765 struct i915_gem_context *kernel_context;
1768 struct intel_engine_cs engine[I915_NUM_ENGINES]; 1766 struct intel_engine_cs engine[I915_NUM_ENGINES];
1769 struct drm_i915_gem_object *semaphore_obj; 1767 struct drm_i915_gem_object *semaphore_obj;
1770 uint32_t last_seqno, next_seqno; 1768 uint32_t last_seqno, next_seqno;
@@ -1820,13 +1818,17 @@ struct drm_i915_private {
1820 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 1818 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1821 1819
1822 unsigned int fsb_freq, mem_freq, is_ddr3; 1820 unsigned int fsb_freq, mem_freq, is_ddr3;
1823 unsigned int skl_boot_cdclk; 1821 unsigned int skl_preferred_vco_freq;
1824 unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq; 1822 unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq;
1825 unsigned int max_dotclk_freq; 1823 unsigned int max_dotclk_freq;
1826 unsigned int rawclk_freq; 1824 unsigned int rawclk_freq;
1827 unsigned int hpll_freq; 1825 unsigned int hpll_freq;
1828 unsigned int czclk_freq; 1826 unsigned int czclk_freq;
1829 1827
1828 struct {
1829 unsigned int vco, ref;
1830 } cdclk_pll;
1831
1830 /** 1832 /**
1831 * wq - Driver workqueue for GEM. 1833 * wq - Driver workqueue for GEM.
1832 * 1834 *
@@ -2018,8 +2020,6 @@ struct drm_i915_private {
2018 void (*stop_engine)(struct intel_engine_cs *engine); 2020 void (*stop_engine)(struct intel_engine_cs *engine);
2019 } gt; 2021 } gt;
2020 2022
2021 struct intel_context *kernel_context;
2022
2023 /* perform PHY state sanity checks? */ 2023 /* perform PHY state sanity checks? */
2024 bool chv_phy_assert[2]; 2024 bool chv_phy_assert[2];
2025 2025
@@ -2386,7 +2386,7 @@ struct drm_i915_gem_request {
2386 * i915_gem_request_free() will then decrement the refcount on the 2386 * i915_gem_request_free() will then decrement the refcount on the
2387 * context. 2387 * context.
2388 */ 2388 */
2389 struct intel_context *ctx; 2389 struct i915_gem_context *ctx;
2390 struct intel_ringbuffer *ringbuf; 2390 struct intel_ringbuffer *ringbuf;
2391 2391
2392 /** 2392 /**
@@ -2398,7 +2398,7 @@ struct drm_i915_gem_request {
2398 * we keep the previous context pinned until the following (this) 2398 * we keep the previous context pinned until the following (this)
2399 * request is retired. 2399 * request is retired.
2400 */ 2400 */
2401 struct intel_context *previous_context; 2401 struct i915_gem_context *previous_context;
2402 2402
2403 /** Batch buffer related to this request if any (used for 2403 /** Batch buffer related to this request if any (used for
2404 error state dump only) */ 2404 error state dump only) */
@@ -2442,7 +2442,7 @@ struct drm_i915_gem_request {
2442 2442
2443struct drm_i915_gem_request * __must_check 2443struct drm_i915_gem_request * __must_check
2444i915_gem_request_alloc(struct intel_engine_cs *engine, 2444i915_gem_request_alloc(struct intel_engine_cs *engine,
2445 struct intel_context *ctx); 2445 struct i915_gem_context *ctx);
2446void i915_gem_request_free(struct kref *req_ref); 2446void i915_gem_request_free(struct kref *req_ref);
2447int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, 2447int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
2448 struct drm_file *file); 2448 struct drm_file *file);
@@ -2807,8 +2807,14 @@ struct drm_i915_cmd_table {
2807 2807
2808#define HAS_CSR(dev) (IS_GEN9(dev)) 2808#define HAS_CSR(dev) (IS_GEN9(dev))
2809 2809
2810#define HAS_GUC_UCODE(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev)) 2810/*
2811#define HAS_GUC_SCHED(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev)) 2811 * For now, anything with a GuC requires uCode loading, and then supports
2812 * command submission once loaded. But these are logically independent
2813 * properties, so we have separate macros to test them.
2814 */
2815#define HAS_GUC(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev))
2816#define HAS_GUC_UCODE(dev) (HAS_GUC(dev))
2817#define HAS_GUC_SCHED(dev) (HAS_GUC(dev))
2812 2818
2813#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \ 2819#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
2814 INTEL_INFO(dev)->gen >= 8) 2820 INTEL_INFO(dev)->gen >= 8)
@@ -3422,22 +3428,36 @@ void i915_gem_context_reset(struct drm_device *dev);
3422int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); 3428int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
3423void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); 3429void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
3424int i915_switch_context(struct drm_i915_gem_request *req); 3430int i915_switch_context(struct drm_i915_gem_request *req);
3425struct intel_context *
3426i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
3427void i915_gem_context_free(struct kref *ctx_ref); 3431void i915_gem_context_free(struct kref *ctx_ref);
3428struct drm_i915_gem_object * 3432struct drm_i915_gem_object *
3429i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); 3433i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
3430static inline void i915_gem_context_reference(struct intel_context *ctx) 3434
3435static inline struct i915_gem_context *
3436i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
3437{
3438 struct i915_gem_context *ctx;
3439
3440 lockdep_assert_held(&file_priv->dev_priv->dev->struct_mutex);
3441
3442 ctx = idr_find(&file_priv->context_idr, id);
3443 if (!ctx)
3444 return ERR_PTR(-ENOENT);
3445
3446 return ctx;
3447}
3448
3449static inline void i915_gem_context_reference(struct i915_gem_context *ctx)
3431{ 3450{
3432 kref_get(&ctx->ref); 3451 kref_get(&ctx->ref);
3433} 3452}
3434 3453
3435static inline void i915_gem_context_unreference(struct intel_context *ctx) 3454static inline void i915_gem_context_unreference(struct i915_gem_context *ctx)
3436{ 3455{
3456 lockdep_assert_held(&ctx->i915->dev->struct_mutex);
3437 kref_put(&ctx->ref, i915_gem_context_free); 3457 kref_put(&ctx->ref, i915_gem_context_free);
3438} 3458}
3439 3459
3440static inline bool i915_gem_context_is_default(const struct intel_context *c) 3460static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
3441{ 3461{
3442 return c->user_handle == DEFAULT_CONTEXT_HANDLE; 3462 return c->user_handle == DEFAULT_CONTEXT_HANDLE;
3443} 3463}
@@ -3607,19 +3627,19 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
3607 3627
3608/* intel_opregion.c */ 3628/* intel_opregion.c */
3609#ifdef CONFIG_ACPI 3629#ifdef CONFIG_ACPI
3610extern int intel_opregion_setup(struct drm_device *dev); 3630extern int intel_opregion_setup(struct drm_i915_private *dev_priv);
3611extern void intel_opregion_init(struct drm_device *dev); 3631extern void intel_opregion_register(struct drm_i915_private *dev_priv);
3612extern void intel_opregion_fini(struct drm_device *dev); 3632extern void intel_opregion_unregister(struct drm_i915_private *dev_priv);
3613extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv); 3633extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
3614extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, 3634extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
3615 bool enable); 3635 bool enable);
3616extern int intel_opregion_notify_adapter(struct drm_device *dev, 3636extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
3617 pci_power_t state); 3637 pci_power_t state);
3618extern int intel_opregion_get_panel_type(struct drm_device *dev); 3638extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
3619#else 3639#else
3620static inline int intel_opregion_setup(struct drm_device *dev) { return 0; } 3640static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; }
3621static inline void intel_opregion_init(struct drm_device *dev) { return; } 3641static inline void intel_opregion_init(struct drm_i915_private *dev) { }
3622static inline void intel_opregion_fini(struct drm_device *dev) { return; } 3642static inline void intel_opregion_fini(struct drm_i915_private *dev) { }
3623static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) 3643static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
3624{ 3644{
3625} 3645}
@@ -3629,11 +3649,11 @@ intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
3629 return 0; 3649 return 0;
3630} 3650}
3631static inline int 3651static inline int
3632intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) 3652intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state)
3633{ 3653{
3634 return 0; 3654 return 0;
3635} 3655}
3636static inline int intel_opregion_get_panel_type(struct drm_device *dev) 3656static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
3637{ 3657{
3638 return -ENODEV; 3658 return -ENODEV;
3639} 3659}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 12407bc70c71..343d88114f3b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2692,7 +2692,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
2692} 2692}
2693 2693
2694static bool i915_context_is_banned(struct drm_i915_private *dev_priv, 2694static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2695 const struct intel_context *ctx) 2695 const struct i915_gem_context *ctx)
2696{ 2696{
2697 unsigned long elapsed; 2697 unsigned long elapsed;
2698 2698
@@ -2717,7 +2717,7 @@ static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2717} 2717}
2718 2718
2719static void i915_set_reset_status(struct drm_i915_private *dev_priv, 2719static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2720 struct intel_context *ctx, 2720 struct i915_gem_context *ctx,
2721 const bool guilty) 2721 const bool guilty)
2722{ 2722{
2723 struct i915_ctx_hang_stats *hs; 2723 struct i915_ctx_hang_stats *hs;
@@ -2745,7 +2745,7 @@ void i915_gem_request_free(struct kref *req_ref)
2745 2745
2746static inline int 2746static inline int
2747__i915_gem_request_alloc(struct intel_engine_cs *engine, 2747__i915_gem_request_alloc(struct intel_engine_cs *engine,
2748 struct intel_context *ctx, 2748 struct i915_gem_context *ctx,
2749 struct drm_i915_gem_request **req_out) 2749 struct drm_i915_gem_request **req_out)
2750{ 2750{
2751 struct drm_i915_private *dev_priv = engine->i915; 2751 struct drm_i915_private *dev_priv = engine->i915;
@@ -2821,7 +2821,7 @@ err:
2821 */ 2821 */
2822struct drm_i915_gem_request * 2822struct drm_i915_gem_request *
2823i915_gem_request_alloc(struct intel_engine_cs *engine, 2823i915_gem_request_alloc(struct intel_engine_cs *engine,
2824 struct intel_context *ctx) 2824 struct i915_gem_context *ctx)
2825{ 2825{
2826 struct drm_i915_gem_request *req; 2826 struct drm_i915_gem_request *req;
2827 int err; 2827 int err;
@@ -4886,13 +4886,10 @@ i915_gem_init_hw(struct drm_device *dev)
4886 intel_mocs_init_l3cc_table(dev); 4886 intel_mocs_init_l3cc_table(dev);
4887 4887
4888 /* We can't enable contexts until all firmware is loaded */ 4888 /* We can't enable contexts until all firmware is loaded */
4889 if (HAS_GUC_UCODE(dev)) { 4889 if (HAS_GUC(dev)) {
4890 ret = intel_guc_ucode_load(dev); 4890 ret = intel_guc_setup(dev);
4891 if (ret) { 4891 if (ret)
4892 DRM_ERROR("Failed to initialize GuC, error %d\n", ret);
4893 ret = -EIO;
4894 goto out; 4892 goto out;
4895 }
4896 } 4893 }
4897 4894
4898 /* 4895 /*
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 2aedd188473d..a3b11aac23a4 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -134,7 +134,7 @@ static int get_context_size(struct drm_i915_private *dev_priv)
134 return ret; 134 return ret;
135} 135}
136 136
137static void i915_gem_context_clean(struct intel_context *ctx) 137static void i915_gem_context_clean(struct i915_gem_context *ctx)
138{ 138{
139 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 139 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
140 struct i915_vma *vma, *next; 140 struct i915_vma *vma, *next;
@@ -151,13 +151,12 @@ static void i915_gem_context_clean(struct intel_context *ctx)
151 151
152void i915_gem_context_free(struct kref *ctx_ref) 152void i915_gem_context_free(struct kref *ctx_ref)
153{ 153{
154 struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); 154 struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
155 int i;
155 156
157 lockdep_assert_held(&ctx->i915->dev->struct_mutex);
156 trace_i915_context_free(ctx); 158 trace_i915_context_free(ctx);
157 159
158 if (i915.enable_execlists)
159 intel_lr_context_free(ctx);
160
161 /* 160 /*
162 * This context is going away and we need to remove all VMAs still 161 * This context is going away and we need to remove all VMAs still
163 * around. This is to handle imported shared objects for which 162 * around. This is to handle imported shared objects for which
@@ -167,8 +166,19 @@ void i915_gem_context_free(struct kref *ctx_ref)
167 166
168 i915_ppgtt_put(ctx->ppgtt); 167 i915_ppgtt_put(ctx->ppgtt);
169 168
170 if (ctx->legacy_hw_ctx.rcs_state) 169 for (i = 0; i < I915_NUM_ENGINES; i++) {
171 drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base); 170 struct intel_context *ce = &ctx->engine[i];
171
172 if (!ce->state)
173 continue;
174
175 WARN_ON(ce->pin_count);
176 if (ce->ringbuf)
177 intel_ringbuffer_free(ce->ringbuf);
178
179 drm_gem_object_unreference(&ce->state->base);
180 }
181
172 list_del(&ctx->link); 182 list_del(&ctx->link);
173 183
174 ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id); 184 ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
@@ -181,6 +191,8 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
181 struct drm_i915_gem_object *obj; 191 struct drm_i915_gem_object *obj;
182 int ret; 192 int ret;
183 193
194 lockdep_assert_held(&dev->struct_mutex);
195
184 obj = i915_gem_object_create(dev, size); 196 obj = i915_gem_object_create(dev, size);
185 if (IS_ERR(obj)) 197 if (IS_ERR(obj))
186 return obj; 198 return obj;
@@ -234,12 +246,12 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
234 return 0; 246 return 0;
235} 247}
236 248
237static struct intel_context * 249static struct i915_gem_context *
238__create_hw_context(struct drm_device *dev, 250__create_hw_context(struct drm_device *dev,
239 struct drm_i915_file_private *file_priv) 251 struct drm_i915_file_private *file_priv)
240{ 252{
241 struct drm_i915_private *dev_priv = dev->dev_private; 253 struct drm_i915_private *dev_priv = dev->dev_private;
242 struct intel_context *ctx; 254 struct i915_gem_context *ctx;
243 int ret; 255 int ret;
244 256
245 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 257 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -263,7 +275,7 @@ __create_hw_context(struct drm_device *dev,
263 ret = PTR_ERR(obj); 275 ret = PTR_ERR(obj);
264 goto err_out; 276 goto err_out;
265 } 277 }
266 ctx->legacy_hw_ctx.rcs_state = obj; 278 ctx->engine[RCS].state = obj;
267 } 279 }
268 280
269 /* Default context will never have a file_priv */ 281 /* Default context will never have a file_priv */
@@ -296,44 +308,27 @@ err_out:
296 * context state of the GPU for applications that don't utilize HW contexts, as 308 * context state of the GPU for applications that don't utilize HW contexts, as
297 * well as an idle case. 309 * well as an idle case.
298 */ 310 */
299static struct intel_context * 311static struct i915_gem_context *
300i915_gem_create_context(struct drm_device *dev, 312i915_gem_create_context(struct drm_device *dev,
301 struct drm_i915_file_private *file_priv) 313 struct drm_i915_file_private *file_priv)
302{ 314{
303 const bool is_global_default_ctx = file_priv == NULL; 315 struct i915_gem_context *ctx;
304 struct intel_context *ctx;
305 int ret = 0;
306 316
307 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 317 lockdep_assert_held(&dev->struct_mutex);
308 318
309 ctx = __create_hw_context(dev, file_priv); 319 ctx = __create_hw_context(dev, file_priv);
310 if (IS_ERR(ctx)) 320 if (IS_ERR(ctx))
311 return ctx; 321 return ctx;
312 322
313 if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
314 /* We may need to do things with the shrinker which
315 * require us to immediately switch back to the default
316 * context. This can cause a problem as pinning the
317 * default context also requires GTT space which may not
318 * be available. To avoid this we always pin the default
319 * context.
320 */
321 ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
322 get_context_alignment(to_i915(dev)), 0);
323 if (ret) {
324 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
325 goto err_destroy;
326 }
327 }
328
329 if (USES_FULL_PPGTT(dev)) { 323 if (USES_FULL_PPGTT(dev)) {
330 struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv); 324 struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
331 325
332 if (IS_ERR_OR_NULL(ppgtt)) { 326 if (IS_ERR(ppgtt)) {
333 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", 327 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
334 PTR_ERR(ppgtt)); 328 PTR_ERR(ppgtt));
335 ret = PTR_ERR(ppgtt); 329 idr_remove(&file_priv->context_idr, ctx->user_handle);
336 goto err_unpin; 330 i915_gem_context_unreference(ctx);
331 return ERR_CAST(ppgtt);
337 } 332 }
338 333
339 ctx->ppgtt = ppgtt; 334 ctx->ppgtt = ppgtt;
@@ -342,24 +337,19 @@ i915_gem_create_context(struct drm_device *dev,
342 trace_i915_context_create(ctx); 337 trace_i915_context_create(ctx);
343 338
344 return ctx; 339 return ctx;
345
346err_unpin:
347 if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
348 i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
349err_destroy:
350 idr_remove(&file_priv->context_idr, ctx->user_handle);
351 i915_gem_context_unreference(ctx);
352 return ERR_PTR(ret);
353} 340}
354 341
355static void i915_gem_context_unpin(struct intel_context *ctx, 342static void i915_gem_context_unpin(struct i915_gem_context *ctx,
356 struct intel_engine_cs *engine) 343 struct intel_engine_cs *engine)
357{ 344{
358 if (i915.enable_execlists) { 345 if (i915.enable_execlists) {
359 intel_lr_context_unpin(ctx, engine); 346 intel_lr_context_unpin(ctx, engine);
360 } else { 347 } else {
361 if (engine->id == RCS && ctx->legacy_hw_ctx.rcs_state) 348 struct intel_context *ce = &ctx->engine[engine->id];
362 i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state); 349
350 if (ce->state)
351 i915_gem_object_ggtt_unpin(ce->state);
352
363 i915_gem_context_unreference(ctx); 353 i915_gem_context_unreference(ctx);
364 } 354 }
365} 355}
@@ -368,8 +358,10 @@ void i915_gem_context_reset(struct drm_device *dev)
368{ 358{
369 struct drm_i915_private *dev_priv = dev->dev_private; 359 struct drm_i915_private *dev_priv = dev->dev_private;
370 360
361 lockdep_assert_held(&dev->struct_mutex);
362
371 if (i915.enable_execlists) { 363 if (i915.enable_execlists) {
372 struct intel_context *ctx; 364 struct i915_gem_context *ctx;
373 365
374 list_for_each_entry(ctx, &dev_priv->context_list, link) 366 list_for_each_entry(ctx, &dev_priv->context_list, link)
375 intel_lr_context_reset(dev_priv, ctx); 367 intel_lr_context_reset(dev_priv, ctx);
@@ -381,7 +373,7 @@ void i915_gem_context_reset(struct drm_device *dev)
381int i915_gem_context_init(struct drm_device *dev) 373int i915_gem_context_init(struct drm_device *dev)
382{ 374{
383 struct drm_i915_private *dev_priv = dev->dev_private; 375 struct drm_i915_private *dev_priv = dev->dev_private;
384 struct intel_context *ctx; 376 struct i915_gem_context *ctx;
385 377
386 /* Init should only be called once per module load. Eventually the 378 /* Init should only be called once per module load. Eventually the
387 * restriction on the context_disabled check can be loosened. */ 379 * restriction on the context_disabled check can be loosened. */
@@ -421,6 +413,26 @@ int i915_gem_context_init(struct drm_device *dev)
421 return PTR_ERR(ctx); 413 return PTR_ERR(ctx);
422 } 414 }
423 415
416 if (!i915.enable_execlists && ctx->engine[RCS].state) {
417 int ret;
418
419 /* We may need to do things with the shrinker which
420 * require us to immediately switch back to the default
421 * context. This can cause a problem as pinning the
422 * default context also requires GTT space which may not
423 * be available. To avoid this we always pin the default
424 * context.
425 */
426 ret = i915_gem_obj_ggtt_pin(ctx->engine[RCS].state,
427 get_context_alignment(dev_priv), 0);
428 if (ret) {
429 DRM_ERROR("Failed to pinned default global context (error %d)\n",
430 ret);
431 i915_gem_context_unreference(ctx);
432 return ret;
433 }
434 }
435
424 dev_priv->kernel_context = ctx; 436 dev_priv->kernel_context = ctx;
425 437
426 DRM_DEBUG_DRIVER("%s context support initialized\n", 438 DRM_DEBUG_DRIVER("%s context support initialized\n",
@@ -433,26 +445,32 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
433{ 445{
434 struct intel_engine_cs *engine; 446 struct intel_engine_cs *engine;
435 447
448 lockdep_assert_held(&dev_priv->dev->struct_mutex);
449
436 for_each_engine(engine, dev_priv) { 450 for_each_engine(engine, dev_priv) {
437 if (engine->last_context == NULL) 451 if (engine->last_context) {
438 continue; 452 i915_gem_context_unpin(engine->last_context, engine);
453 engine->last_context = NULL;
454 }
439 455
440 i915_gem_context_unpin(engine->last_context, engine); 456 /* Force the GPU state to be reinitialised on enabling */
441 engine->last_context = NULL; 457 dev_priv->kernel_context->engine[engine->id].initialised =
458 engine->init_context == NULL;
442 } 459 }
443 460
444 /* Force the GPU state to be reinitialised on enabling */ 461 /* Force the GPU state to be reinitialised on enabling */
445 dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
446 dev_priv->kernel_context->remap_slice = ALL_L3_SLICES(dev_priv); 462 dev_priv->kernel_context->remap_slice = ALL_L3_SLICES(dev_priv);
447} 463}
448 464
449void i915_gem_context_fini(struct drm_device *dev) 465void i915_gem_context_fini(struct drm_device *dev)
450{ 466{
451 struct drm_i915_private *dev_priv = dev->dev_private; 467 struct drm_i915_private *dev_priv = dev->dev_private;
452 struct intel_context *dctx = dev_priv->kernel_context; 468 struct i915_gem_context *dctx = dev_priv->kernel_context;
469
470 lockdep_assert_held(&dev->struct_mutex);
453 471
454 if (dctx->legacy_hw_ctx.rcs_state) 472 if (!i915.enable_execlists && dctx->engine[RCS].state)
455 i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); 473 i915_gem_object_ggtt_unpin(dctx->engine[RCS].state);
456 474
457 i915_gem_context_unreference(dctx); 475 i915_gem_context_unreference(dctx);
458 dev_priv->kernel_context = NULL; 476 dev_priv->kernel_context = NULL;
@@ -462,8 +480,9 @@ void i915_gem_context_fini(struct drm_device *dev)
462 480
463static int context_idr_cleanup(int id, void *p, void *data) 481static int context_idr_cleanup(int id, void *p, void *data)
464{ 482{
465 struct intel_context *ctx = p; 483 struct i915_gem_context *ctx = p;
466 484
485 ctx->file_priv = ERR_PTR(-EBADF);
467 i915_gem_context_unreference(ctx); 486 i915_gem_context_unreference(ctx);
468 return 0; 487 return 0;
469} 488}
@@ -471,7 +490,7 @@ static int context_idr_cleanup(int id, void *p, void *data)
471int i915_gem_context_open(struct drm_device *dev, struct drm_file *file) 490int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
472{ 491{
473 struct drm_i915_file_private *file_priv = file->driver_priv; 492 struct drm_i915_file_private *file_priv = file->driver_priv;
474 struct intel_context *ctx; 493 struct i915_gem_context *ctx;
475 494
476 idr_init(&file_priv->context_idr); 495 idr_init(&file_priv->context_idr);
477 496
@@ -491,22 +510,12 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
491{ 510{
492 struct drm_i915_file_private *file_priv = file->driver_priv; 511 struct drm_i915_file_private *file_priv = file->driver_priv;
493 512
513 lockdep_assert_held(&dev->struct_mutex);
514
494 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); 515 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
495 idr_destroy(&file_priv->context_idr); 516 idr_destroy(&file_priv->context_idr);
496} 517}
497 518
498struct intel_context *
499i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
500{
501 struct intel_context *ctx;
502
503 ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
504 if (!ctx)
505 return ERR_PTR(-ENOENT);
506
507 return ctx;
508}
509
510static inline int 519static inline int
511mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) 520mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
512{ 521{
@@ -569,7 +578,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
569 intel_ring_emit(engine, MI_NOOP); 578 intel_ring_emit(engine, MI_NOOP);
570 intel_ring_emit(engine, MI_SET_CONTEXT); 579 intel_ring_emit(engine, MI_SET_CONTEXT);
571 intel_ring_emit(engine, 580 intel_ring_emit(engine,
572 i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) | 581 i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) |
573 flags); 582 flags);
574 /* 583 /*
575 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP 584 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
@@ -641,12 +650,12 @@ static int remap_l3(struct drm_i915_gem_request *req, int slice)
641 650
642static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt, 651static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
643 struct intel_engine_cs *engine, 652 struct intel_engine_cs *engine,
644 struct intel_context *to) 653 struct i915_gem_context *to)
645{ 654{
646 if (to->remap_slice) 655 if (to->remap_slice)
647 return false; 656 return false;
648 657
649 if (!to->legacy_hw_ctx.initialized) 658 if (!to->engine[RCS].initialised)
650 return false; 659 return false;
651 660
652 if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings)) 661 if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
@@ -658,7 +667,7 @@ static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
658static bool 667static bool
659needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt, 668needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
660 struct intel_engine_cs *engine, 669 struct intel_engine_cs *engine,
661 struct intel_context *to) 670 struct i915_gem_context *to)
662{ 671{
663 if (!ppgtt) 672 if (!ppgtt)
664 return false; 673 return false;
@@ -683,7 +692,7 @@ needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
683 692
684static bool 693static bool
685needs_pd_load_post(struct i915_hw_ppgtt *ppgtt, 694needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
686 struct intel_context *to, 695 struct i915_gem_context *to,
687 u32 hw_flags) 696 u32 hw_flags)
688{ 697{
689 if (!ppgtt) 698 if (!ppgtt)
@@ -700,10 +709,10 @@ needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
700 709
701static int do_rcs_switch(struct drm_i915_gem_request *req) 710static int do_rcs_switch(struct drm_i915_gem_request *req)
702{ 711{
703 struct intel_context *to = req->ctx; 712 struct i915_gem_context *to = req->ctx;
704 struct intel_engine_cs *engine = req->engine; 713 struct intel_engine_cs *engine = req->engine;
705 struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt; 714 struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
706 struct intel_context *from; 715 struct i915_gem_context *from;
707 u32 hw_flags; 716 u32 hw_flags;
708 int ret, i; 717 int ret, i;
709 718
@@ -711,7 +720,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
711 return 0; 720 return 0;
712 721
713 /* Trying to pin first makes error handling easier. */ 722 /* Trying to pin first makes error handling easier. */
714 ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state, 723 ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state,
715 get_context_alignment(engine->i915), 724 get_context_alignment(engine->i915),
716 0); 725 0);
717 if (ret) 726 if (ret)
@@ -734,7 +743,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
734 * 743 *
735 * XXX: We need a real interface to do this instead of trickery. 744 * XXX: We need a real interface to do this instead of trickery.
736 */ 745 */
737 ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false); 746 ret = i915_gem_object_set_to_gtt_domain(to->engine[RCS].state, false);
738 if (ret) 747 if (ret)
739 goto unpin_out; 748 goto unpin_out;
740 749
@@ -749,7 +758,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
749 goto unpin_out; 758 goto unpin_out;
750 } 759 }
751 760
752 if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) 761 if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
753 /* NB: If we inhibit the restore, the context is not allowed to 762 /* NB: If we inhibit the restore, the context is not allowed to
754 * die because future work may end up depending on valid address 763 * die because future work may end up depending on valid address
755 * space. This means we must enforce that a page table load 764 * space. This means we must enforce that a page table load
@@ -773,8 +782,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
773 * MI_SET_CONTEXT instead of when the next seqno has completed. 782 * MI_SET_CONTEXT instead of when the next seqno has completed.
774 */ 783 */
775 if (from != NULL) { 784 if (from != NULL) {
776 from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; 785 from->engine[RCS].state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
777 i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req); 786 i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->engine[RCS].state), req);
778 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the 787 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
779 * whole damn pipeline, we don't need to explicitly mark the 788 * whole damn pipeline, we don't need to explicitly mark the
780 * object dirty. The only exception is that the context must be 789 * object dirty. The only exception is that the context must be
@@ -782,10 +791,10 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
782 * able to defer doing this until we know the object would be 791 * able to defer doing this until we know the object would be
783 * swapped, but there is no way to do that yet. 792 * swapped, but there is no way to do that yet.
784 */ 793 */
785 from->legacy_hw_ctx.rcs_state->dirty = 1; 794 from->engine[RCS].state->dirty = 1;
786 795
787 /* obj is kept alive until the next request by its active ref */ 796 /* obj is kept alive until the next request by its active ref */
788 i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); 797 i915_gem_object_ggtt_unpin(from->engine[RCS].state);
789 i915_gem_context_unreference(from); 798 i915_gem_context_unreference(from);
790 } 799 }
791 i915_gem_context_reference(to); 800 i915_gem_context_reference(to);
@@ -820,19 +829,19 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
820 to->remap_slice &= ~(1<<i); 829 to->remap_slice &= ~(1<<i);
821 } 830 }
822 831
823 if (!to->legacy_hw_ctx.initialized) { 832 if (!to->engine[RCS].initialised) {
824 if (engine->init_context) { 833 if (engine->init_context) {
825 ret = engine->init_context(req); 834 ret = engine->init_context(req);
826 if (ret) 835 if (ret)
827 return ret; 836 return ret;
828 } 837 }
829 to->legacy_hw_ctx.initialized = true; 838 to->engine[RCS].initialised = true;
830 } 839 }
831 840
832 return 0; 841 return 0;
833 842
834unpin_out: 843unpin_out:
835 i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state); 844 i915_gem_object_ggtt_unpin(to->engine[RCS].state);
836 return ret; 845 return ret;
837} 846}
838 847
@@ -852,14 +861,12 @@ unpin_out:
852int i915_switch_context(struct drm_i915_gem_request *req) 861int i915_switch_context(struct drm_i915_gem_request *req)
853{ 862{
854 struct intel_engine_cs *engine = req->engine; 863 struct intel_engine_cs *engine = req->engine;
855 struct drm_i915_private *dev_priv = req->i915;
856 864
857 WARN_ON(i915.enable_execlists); 865 WARN_ON(i915.enable_execlists);
858 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 866 lockdep_assert_held(&req->i915->dev->struct_mutex);
859 867
860 if (engine->id != RCS || 868 if (!req->ctx->engine[engine->id].state) {
861 req->ctx->legacy_hw_ctx.rcs_state == NULL) { 869 struct i915_gem_context *to = req->ctx;
862 struct intel_context *to = req->ctx;
863 struct i915_hw_ppgtt *ppgtt = 870 struct i915_hw_ppgtt *ppgtt =
864 to->ppgtt ?: req->i915->mm.aliasing_ppgtt; 871 to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
865 872
@@ -897,7 +904,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
897{ 904{
898 struct drm_i915_gem_context_create *args = data; 905 struct drm_i915_gem_context_create *args = data;
899 struct drm_i915_file_private *file_priv = file->driver_priv; 906 struct drm_i915_file_private *file_priv = file->driver_priv;
900 struct intel_context *ctx; 907 struct i915_gem_context *ctx;
901 int ret; 908 int ret;
902 909
903 if (!contexts_enabled(dev)) 910 if (!contexts_enabled(dev))
@@ -926,7 +933,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
926{ 933{
927 struct drm_i915_gem_context_destroy *args = data; 934 struct drm_i915_gem_context_destroy *args = data;
928 struct drm_i915_file_private *file_priv = file->driver_priv; 935 struct drm_i915_file_private *file_priv = file->driver_priv;
929 struct intel_context *ctx; 936 struct i915_gem_context *ctx;
930 int ret; 937 int ret;
931 938
932 if (args->pad != 0) 939 if (args->pad != 0)
@@ -939,13 +946,13 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
939 if (ret) 946 if (ret)
940 return ret; 947 return ret;
941 948
942 ctx = i915_gem_context_get(file_priv, args->ctx_id); 949 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
943 if (IS_ERR(ctx)) { 950 if (IS_ERR(ctx)) {
944 mutex_unlock(&dev->struct_mutex); 951 mutex_unlock(&dev->struct_mutex);
945 return PTR_ERR(ctx); 952 return PTR_ERR(ctx);
946 } 953 }
947 954
948 idr_remove(&ctx->file_priv->context_idr, ctx->user_handle); 955 idr_remove(&file_priv->context_idr, ctx->user_handle);
949 i915_gem_context_unreference(ctx); 956 i915_gem_context_unreference(ctx);
950 mutex_unlock(&dev->struct_mutex); 957 mutex_unlock(&dev->struct_mutex);
951 958
@@ -958,14 +965,14 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
958{ 965{
959 struct drm_i915_file_private *file_priv = file->driver_priv; 966 struct drm_i915_file_private *file_priv = file->driver_priv;
960 struct drm_i915_gem_context_param *args = data; 967 struct drm_i915_gem_context_param *args = data;
961 struct intel_context *ctx; 968 struct i915_gem_context *ctx;
962 int ret; 969 int ret;
963 970
964 ret = i915_mutex_lock_interruptible(dev); 971 ret = i915_mutex_lock_interruptible(dev);
965 if (ret) 972 if (ret)
966 return ret; 973 return ret;
967 974
968 ctx = i915_gem_context_get(file_priv, args->ctx_id); 975 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
969 if (IS_ERR(ctx)) { 976 if (IS_ERR(ctx)) {
970 mutex_unlock(&dev->struct_mutex); 977 mutex_unlock(&dev->struct_mutex);
971 return PTR_ERR(ctx); 978 return PTR_ERR(ctx);
@@ -1001,14 +1008,14 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
1001{ 1008{
1002 struct drm_i915_file_private *file_priv = file->driver_priv; 1009 struct drm_i915_file_private *file_priv = file->driver_priv;
1003 struct drm_i915_gem_context_param *args = data; 1010 struct drm_i915_gem_context_param *args = data;
1004 struct intel_context *ctx; 1011 struct i915_gem_context *ctx;
1005 int ret; 1012 int ret;
1006 1013
1007 ret = i915_mutex_lock_interruptible(dev); 1014 ret = i915_mutex_lock_interruptible(dev);
1008 if (ret) 1015 if (ret)
1009 return ret; 1016 return ret;
1010 1017
1011 ctx = i915_gem_context_get(file_priv, args->ctx_id); 1018 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
1012 if (IS_ERR(ctx)) { 1019 if (IS_ERR(ctx)) {
1013 mutex_unlock(&dev->struct_mutex); 1020 mutex_unlock(&dev->struct_mutex);
1014 return PTR_ERR(ctx); 1021 return PTR_ERR(ctx);
@@ -1047,7 +1054,7 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
1047 struct drm_i915_private *dev_priv = dev->dev_private; 1054 struct drm_i915_private *dev_priv = dev->dev_private;
1048 struct drm_i915_reset_stats *args = data; 1055 struct drm_i915_reset_stats *args = data;
1049 struct i915_ctx_hang_stats *hs; 1056 struct i915_ctx_hang_stats *hs;
1050 struct intel_context *ctx; 1057 struct i915_gem_context *ctx;
1051 int ret; 1058 int ret;
1052 1059
1053 if (args->flags || args->pad) 1060 if (args->flags || args->pad)
@@ -1060,7 +1067,7 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
1060 if (ret) 1067 if (ret)
1061 return ret; 1068 return ret;
1062 1069
1063 ctx = i915_gem_context_get(file->driver_priv, args->ctx_id); 1070 ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
1064 if (IS_ERR(ctx)) { 1071 if (IS_ERR(ctx)) {
1065 mutex_unlock(&dev->struct_mutex); 1072 mutex_unlock(&dev->struct_mutex);
1066 return PTR_ERR(ctx); 1073 return PTR_ERR(ctx);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index f315e78f38ed..8097698b9622 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -714,7 +714,7 @@ eb_vma_misplaced(struct i915_vma *vma)
714static int 714static int
715i915_gem_execbuffer_reserve(struct intel_engine_cs *engine, 715i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
716 struct list_head *vmas, 716 struct list_head *vmas,
717 struct intel_context *ctx, 717 struct i915_gem_context *ctx,
718 bool *need_relocs) 718 bool *need_relocs)
719{ 719{
720 struct drm_i915_gem_object *obj; 720 struct drm_i915_gem_object *obj;
@@ -826,7 +826,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
826 struct intel_engine_cs *engine, 826 struct intel_engine_cs *engine,
827 struct eb_vmas *eb, 827 struct eb_vmas *eb,
828 struct drm_i915_gem_exec_object2 *exec, 828 struct drm_i915_gem_exec_object2 *exec,
829 struct intel_context *ctx) 829 struct i915_gem_context *ctx)
830{ 830{
831 struct drm_i915_gem_relocation_entry *reloc; 831 struct drm_i915_gem_relocation_entry *reloc;
832 struct i915_address_space *vm; 832 struct i915_address_space *vm;
@@ -1063,17 +1063,17 @@ validate_exec_list(struct drm_device *dev,
1063 return 0; 1063 return 0;
1064} 1064}
1065 1065
1066static struct intel_context * 1066static struct i915_gem_context *
1067i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, 1067i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
1068 struct intel_engine_cs *engine, const u32 ctx_id) 1068 struct intel_engine_cs *engine, const u32 ctx_id)
1069{ 1069{
1070 struct intel_context *ctx = NULL; 1070 struct i915_gem_context *ctx = NULL;
1071 struct i915_ctx_hang_stats *hs; 1071 struct i915_ctx_hang_stats *hs;
1072 1072
1073 if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE) 1073 if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
1074 return ERR_PTR(-EINVAL); 1074 return ERR_PTR(-EINVAL);
1075 1075
1076 ctx = i915_gem_context_get(file->driver_priv, ctx_id); 1076 ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
1077 if (IS_ERR(ctx)) 1077 if (IS_ERR(ctx))
1078 return ctx; 1078 return ctx;
1079 1079
@@ -1428,7 +1428,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1428 struct drm_i915_gem_object *batch_obj; 1428 struct drm_i915_gem_object *batch_obj;
1429 struct drm_i915_gem_exec_object2 shadow_exec_entry; 1429 struct drm_i915_gem_exec_object2 shadow_exec_entry;
1430 struct intel_engine_cs *engine; 1430 struct intel_engine_cs *engine;
1431 struct intel_context *ctx; 1431 struct i915_gem_context *ctx;
1432 struct i915_address_space *vm; 1432 struct i915_address_space *vm;
1433 struct i915_execbuffer_params params_master; /* XXX: will be removed later */ 1433 struct i915_execbuffer_params params_master; /* XXX: will be removed later */
1434 struct i915_execbuffer_params *params = &params_master; 1434 struct i915_execbuffer_params *params = &params_master;
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 169242a8adff..ac72451c571c 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -360,10 +360,9 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
360 struct drm_i915_gem_object *client_obj = client->client_obj; 360 struct drm_i915_gem_object *client_obj = client->client_obj;
361 struct drm_i915_private *dev_priv = guc_to_i915(guc); 361 struct drm_i915_private *dev_priv = guc_to_i915(guc);
362 struct intel_engine_cs *engine; 362 struct intel_engine_cs *engine;
363 struct intel_context *ctx = client->owner; 363 struct i915_gem_context *ctx = client->owner;
364 struct guc_context_desc desc; 364 struct guc_context_desc desc;
365 struct sg_table *sg; 365 struct sg_table *sg;
366 enum intel_engine_id id;
367 u32 gfx_addr; 366 u32 gfx_addr;
368 367
369 memset(&desc, 0, sizeof(desc)); 368 memset(&desc, 0, sizeof(desc));
@@ -373,10 +372,10 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
373 desc.priority = client->priority; 372 desc.priority = client->priority;
374 desc.db_id = client->doorbell_id; 373 desc.db_id = client->doorbell_id;
375 374
376 for_each_engine_id(engine, dev_priv, id) { 375 for_each_engine(engine, dev_priv) {
376 struct intel_context *ce = &ctx->engine[engine->id];
377 struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id]; 377 struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
378 struct drm_i915_gem_object *obj; 378 struct drm_i915_gem_object *obj;
379 uint64_t ctx_desc;
380 379
381 /* TODO: We have a design issue to be solved here. Only when we 380 /* TODO: We have a design issue to be solved here. Only when we
382 * receive the first batch, we know which engine is used by the 381 * receive the first batch, we know which engine is used by the
@@ -385,20 +384,18 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
385 * for now who owns a GuC client. But for future owner of GuC 384 * for now who owns a GuC client. But for future owner of GuC
386 * client, need to make sure lrc is pinned prior to enter here. 385 * client, need to make sure lrc is pinned prior to enter here.
387 */ 386 */
388 obj = ctx->engine[id].state; 387 if (!ce->state)
389 if (!obj)
390 break; /* XXX: continue? */ 388 break; /* XXX: continue? */
391 389
392 ctx_desc = intel_lr_context_descriptor(ctx, engine); 390 lrc->context_desc = lower_32_bits(ce->lrc_desc);
393 lrc->context_desc = (u32)ctx_desc;
394 391
395 /* The state page is after PPHWSP */ 392 /* The state page is after PPHWSP */
396 gfx_addr = i915_gem_obj_ggtt_offset(obj); 393 gfx_addr = i915_gem_obj_ggtt_offset(ce->state);
397 lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE; 394 lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE;
398 lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) | 395 lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
399 (engine->guc_id << GUC_ELC_ENGINE_OFFSET); 396 (engine->guc_id << GUC_ELC_ENGINE_OFFSET);
400 397
401 obj = ctx->engine[id].ringbuf->obj; 398 obj = ce->ringbuf->obj;
402 gfx_addr = i915_gem_obj_ggtt_offset(obj); 399 gfx_addr = i915_gem_obj_ggtt_offset(obj);
403 400
404 lrc->ring_begin = gfx_addr; 401 lrc->ring_begin = gfx_addr;
@@ -426,7 +423,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
426 desc.wq_size = client->wq_size; 423 desc.wq_size = client->wq_size;
427 424
428 /* 425 /*
429 * XXX: Take LRCs from an existing intel_context if this is not an 426 * XXX: Take LRCs from an existing context if this is not an
430 * IsKMDCreatedContext client 427 * IsKMDCreatedContext client
431 */ 428 */
432 desc.desc_private = (uintptr_t)client; 429 desc.desc_private = (uintptr_t)client;
@@ -450,47 +447,64 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
450 sizeof(desc) * client->ctx_index); 447 sizeof(desc) * client->ctx_index);
451} 448}
452 449
453int i915_guc_wq_check_space(struct i915_guc_client *gc) 450/**
451 * i915_guc_wq_check_space() - check that the GuC can accept a request
452 * @request: request associated with the commands
453 *
454 * Return: 0 if space is available
455 * -EAGAIN if space is not currently available
456 *
457 * This function must be called (and must return 0) before a request
458 * is submitted to the GuC via i915_guc_submit() below. Once a result
459 * of 0 has been returned, it remains valid until (but only until)
460 * the next call to submit().
461 *
462 * This precheck allows the caller to determine in advance that space
463 * will be available for the next submission before committing resources
464 * to it, and helps avoid late failures with complicated recovery paths.
465 */
466int i915_guc_wq_check_space(struct drm_i915_gem_request *request)
454{ 467{
468 const size_t wqi_size = sizeof(struct guc_wq_item);
469 struct i915_guc_client *gc = request->i915->guc.execbuf_client;
455 struct guc_process_desc *desc; 470 struct guc_process_desc *desc;
456 u32 size = sizeof(struct guc_wq_item); 471 u32 freespace;
457 int ret = -ETIMEDOUT, timeout_counter = 200;
458 472
459 if (!gc) 473 GEM_BUG_ON(gc == NULL);
460 return 0;
461 474
462 desc = gc->client_base + gc->proc_desc_offset; 475 desc = gc->client_base + gc->proc_desc_offset;
463 476
464 while (timeout_counter-- > 0) { 477 freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
465 if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) { 478 if (likely(freespace >= wqi_size))
466 ret = 0; 479 return 0;
467 break;
468 }
469 480
470 if (timeout_counter) 481 gc->no_wq_space += 1;
471 usleep_range(1000, 2000);
472 };
473 482
474 return ret; 483 return -EAGAIN;
475} 484}
476 485
477static int guc_add_workqueue_item(struct i915_guc_client *gc, 486static void guc_add_workqueue_item(struct i915_guc_client *gc,
478 struct drm_i915_gem_request *rq) 487 struct drm_i915_gem_request *rq)
479{ 488{
489 /* wqi_len is in DWords, and does not include the one-word header */
490 const size_t wqi_size = sizeof(struct guc_wq_item);
491 const u32 wqi_len = wqi_size/sizeof(u32) - 1;
480 struct guc_process_desc *desc; 492 struct guc_process_desc *desc;
481 struct guc_wq_item *wqi; 493 struct guc_wq_item *wqi;
482 void *base; 494 void *base;
483 u32 tail, wq_len, wq_off, space; 495 u32 freespace, tail, wq_off, wq_page;
484 496
485 desc = gc->client_base + gc->proc_desc_offset; 497 desc = gc->client_base + gc->proc_desc_offset;
486 space = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
487 if (WARN_ON(space < sizeof(struct guc_wq_item)))
488 return -ENOSPC; /* shouldn't happen */
489 498
490 /* postincrement WQ tail for next time */ 499 /* Free space is guaranteed, see i915_guc_wq_check_space() above */
491 wq_off = gc->wq_tail; 500 freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
492 gc->wq_tail += sizeof(struct guc_wq_item); 501 GEM_BUG_ON(freespace < wqi_size);
493 gc->wq_tail &= gc->wq_size - 1; 502
503 /* The GuC firmware wants the tail index in QWords, not bytes */
504 tail = rq->tail;
505 GEM_BUG_ON(tail & 7);
506 tail >>= 3;
507 GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
494 508
495 /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we 509 /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
496 * should not have the case where structure wqi is across page, neither 510 * should not have the case where structure wqi is across page, neither
@@ -499,19 +513,23 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
499 * XXX: if not the case, we need save data to a temp wqi and copy it to 513 * XXX: if not the case, we need save data to a temp wqi and copy it to
500 * workqueue buffer dw by dw. 514 * workqueue buffer dw by dw.
501 */ 515 */
502 WARN_ON(sizeof(struct guc_wq_item) != 16); 516 BUILD_BUG_ON(wqi_size != 16);
503 WARN_ON(wq_off & 3); 517
518 /* postincrement WQ tail for next time */
519 wq_off = gc->wq_tail;
520 gc->wq_tail += wqi_size;
521 gc->wq_tail &= gc->wq_size - 1;
522 GEM_BUG_ON(wq_off & (wqi_size - 1));
504 523
505 /* wq starts from the page after doorbell / process_desc */ 524 /* WQ starts from the page after doorbell / process_desc */
506 base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 525 wq_page = (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT;
507 (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT));
508 wq_off &= PAGE_SIZE - 1; 526 wq_off &= PAGE_SIZE - 1;
527 base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, wq_page));
509 wqi = (struct guc_wq_item *)((char *)base + wq_off); 528 wqi = (struct guc_wq_item *)((char *)base + wq_off);
510 529
511 /* len does not include the header */ 530 /* Now fill in the 4-word work queue item */
512 wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
513 wqi->header = WQ_TYPE_INORDER | 531 wqi->header = WQ_TYPE_INORDER |
514 (wq_len << WQ_LEN_SHIFT) | 532 (wqi_len << WQ_LEN_SHIFT) |
515 (rq->engine->guc_id << WQ_TARGET_SHIFT) | 533 (rq->engine->guc_id << WQ_TARGET_SHIFT) |
516 WQ_NO_WCFLUSH_WAIT; 534 WQ_NO_WCFLUSH_WAIT;
517 535
@@ -519,48 +537,50 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
519 wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, 537 wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx,
520 rq->engine); 538 rq->engine);
521 539
522 /* The GuC firmware wants the tail index in QWords, not bytes */
523 tail = rq->ringbuf->tail >> 3;
524 wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT; 540 wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
525 wqi->fence_id = 0; /*XXX: what fence to be here */ 541 wqi->fence_id = rq->seqno;
526 542
527 kunmap_atomic(base); 543 kunmap_atomic(base);
528
529 return 0;
530} 544}
531 545
532/** 546/**
533 * i915_guc_submit() - Submit commands through GuC 547 * i915_guc_submit() - Submit commands through GuC
534 * @client: the guc client where commands will go through
535 * @rq: request associated with the commands 548 * @rq: request associated with the commands
536 * 549 *
537 * Return: 0 if succeed 550 * Return: 0 on success, otherwise an errno.
551 * (Note: nonzero really shouldn't happen!)
552 *
553 * The caller must have already called i915_guc_wq_check_space() above
554 * with a result of 0 (success) since the last request submission. This
555 * guarantees that there is space in the work queue for the new request,
556 * so enqueuing the item cannot fail.
557 *
558 * Bad Things Will Happen if the caller violates this protocol e.g. calls
559 * submit() when check() says there's no space, or calls submit() multiple
560 * times with no intervening check().
561 *
562 * The only error here arises if the doorbell hardware isn't functioning
563 * as expected, which really shouln't happen.
538 */ 564 */
539int i915_guc_submit(struct i915_guc_client *client, 565int i915_guc_submit(struct drm_i915_gem_request *rq)
540 struct drm_i915_gem_request *rq)
541{ 566{
542 struct intel_guc *guc = client->guc;
543 unsigned int engine_id = rq->engine->guc_id; 567 unsigned int engine_id = rq->engine->guc_id;
544 int q_ret, b_ret; 568 struct intel_guc *guc = &rq->i915->guc;
569 struct i915_guc_client *client = guc->execbuf_client;
570 int b_ret;
545 571
546 q_ret = guc_add_workqueue_item(client, rq); 572 guc_add_workqueue_item(client, rq);
547 if (q_ret == 0) 573 b_ret = guc_ring_doorbell(client);
548 b_ret = guc_ring_doorbell(client);
549 574
550 client->submissions[engine_id] += 1; 575 client->submissions[engine_id] += 1;
551 if (q_ret) { 576 client->retcode = b_ret;
552 client->q_fail += 1; 577 if (b_ret)
553 client->retcode = q_ret;
554 } else if (b_ret) {
555 client->b_fail += 1; 578 client->b_fail += 1;
556 client->retcode = q_ret = b_ret; 579
557 } else {
558 client->retcode = 0;
559 }
560 guc->submissions[engine_id] += 1; 580 guc->submissions[engine_id] += 1;
561 guc->last_seqno[engine_id] = rq->seqno; 581 guc->last_seqno[engine_id] = rq->seqno;
562 582
563 return q_ret; 583 return b_ret;
564} 584}
565 585
566/* 586/*
@@ -677,7 +697,7 @@ static void guc_client_free(struct drm_device *dev,
677 */ 697 */
678static struct i915_guc_client *guc_client_alloc(struct drm_device *dev, 698static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
679 uint32_t priority, 699 uint32_t priority,
680 struct intel_context *ctx) 700 struct i915_gem_context *ctx)
681{ 701{
682 struct i915_guc_client *client; 702 struct i915_guc_client *client;
683 struct drm_i915_private *dev_priv = dev->dev_private; 703 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -915,11 +935,12 @@ int i915_guc_submission_enable(struct drm_device *dev)
915{ 935{
916 struct drm_i915_private *dev_priv = dev->dev_private; 936 struct drm_i915_private *dev_priv = dev->dev_private;
917 struct intel_guc *guc = &dev_priv->guc; 937 struct intel_guc *guc = &dev_priv->guc;
918 struct intel_context *ctx = dev_priv->kernel_context;
919 struct i915_guc_client *client; 938 struct i915_guc_client *client;
920 939
921 /* client for execbuf submission */ 940 /* client for execbuf submission */
922 client = guc_client_alloc(dev, GUC_CTX_PRIORITY_KMD_NORMAL, ctx); 941 client = guc_client_alloc(dev,
942 GUC_CTX_PRIORITY_KMD_NORMAL,
943 dev_priv->kernel_context);
923 if (!client) { 944 if (!client) {
924 DRM_ERROR("Failed to create execbuf guc_client\n"); 945 DRM_ERROR("Failed to create execbuf guc_client\n");
925 return -ENOMEM; 946 return -ENOMEM;
@@ -966,10 +987,10 @@ int intel_guc_suspend(struct drm_device *dev)
966{ 987{
967 struct drm_i915_private *dev_priv = dev->dev_private; 988 struct drm_i915_private *dev_priv = dev->dev_private;
968 struct intel_guc *guc = &dev_priv->guc; 989 struct intel_guc *guc = &dev_priv->guc;
969 struct intel_context *ctx; 990 struct i915_gem_context *ctx;
970 u32 data[3]; 991 u32 data[3];
971 992
972 if (!i915.enable_guc_submission) 993 if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
973 return 0; 994 return 0;
974 995
975 ctx = dev_priv->kernel_context; 996 ctx = dev_priv->kernel_context;
@@ -992,10 +1013,10 @@ int intel_guc_resume(struct drm_device *dev)
992{ 1013{
993 struct drm_i915_private *dev_priv = dev->dev_private; 1014 struct drm_i915_private *dev_priv = dev->dev_private;
994 struct intel_guc *guc = &dev_priv->guc; 1015 struct intel_guc *guc = &dev_priv->guc;
995 struct intel_context *ctx; 1016 struct i915_gem_context *ctx;
996 u32 data[3]; 1017 u32 data[3];
997 1018
998 if (!i915.enable_guc_submission) 1019 if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
999 return 0; 1020 return 0;
1000 1021
1001 ctx = dev_priv->kernel_context; 1022 ctx = dev_priv->kernel_context;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3242a37fb304..5c7378374ae6 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -364,19 +364,7 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
364 364
365u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) 365u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
366{ 366{
367 /* 367 return (mask & ~dev_priv->rps.pm_intr_keep);
368 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
369 * if GEN6_PM_UP_EI_EXPIRED is masked.
370 *
371 * TODO: verify if this can be reproduced on VLV,CHV.
372 */
373 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
374 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
375
376 if (INTEL_INFO(dev_priv)->gen >= 8)
377 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
378
379 return mask;
380} 368}
381 369
382void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) 370void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
@@ -3797,6 +3785,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3797 uint32_t de_pipe_enables; 3785 uint32_t de_pipe_enables;
3798 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 3786 u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3799 u32 de_port_enables; 3787 u32 de_port_enables;
3788 u32 de_misc_masked = GEN8_DE_MISC_GSE;
3800 enum pipe pipe; 3789 enum pipe pipe;
3801 3790
3802 if (INTEL_INFO(dev_priv)->gen >= 9) { 3791 if (INTEL_INFO(dev_priv)->gen >= 9) {
@@ -3832,6 +3821,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3832 de_pipe_enables); 3821 de_pipe_enables);
3833 3822
3834 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3823 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3824 GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3835} 3825}
3836 3826
3837static int gen8_irq_postinstall(struct drm_device *dev) 3827static int gen8_irq_postinstall(struct drm_device *dev)
@@ -4576,6 +4566,20 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4576 else 4566 else
4577 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4567 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4578 4568
4569 dev_priv->rps.pm_intr_keep = 0;
4570
4571 /*
4572 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
4573 * if GEN6_PM_UP_EI_EXPIRED is masked.
4574 *
4575 * TODO: verify if this can be reproduced on VLV,CHV.
4576 */
4577 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
4578 dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED;
4579
4580 if (INTEL_INFO(dev_priv)->gen >= 8)
4581 dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
4582
4579 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work, 4583 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4580 i915_hangcheck_elapsed); 4584 i915_hangcheck_elapsed);
4581 4585
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 383c076919ed..5e18cf9f754d 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -54,7 +54,8 @@ struct i915_params i915 __read_mostly = {
54 .verbose_state_checks = 1, 54 .verbose_state_checks = 1,
55 .nuclear_pageflip = 0, 55 .nuclear_pageflip = 0,
56 .edp_vswing = 0, 56 .edp_vswing = 0,
57 .enable_guc_submission = false, 57 .enable_guc_loading = 0,
58 .enable_guc_submission = 0,
58 .guc_log_level = -1, 59 .guc_log_level = -1,
59 .enable_dp_mst = true, 60 .enable_dp_mst = true,
60 .inject_load_failure = 0, 61 .inject_load_failure = 0,
@@ -198,8 +199,15 @@ MODULE_PARM_DESC(edp_vswing,
198 "(0=use value from vbt [default], 1=low power swing(200mV)," 199 "(0=use value from vbt [default], 1=low power swing(200mV),"
199 "2=default swing(400mV))"); 200 "2=default swing(400mV))");
200 201
201module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, bool, 0400); 202module_param_named_unsafe(enable_guc_loading, i915.enable_guc_loading, int, 0400);
202MODULE_PARM_DESC(enable_guc_submission, "Enable GuC submission (default:false)"); 203MODULE_PARM_DESC(enable_guc_loading,
204 "Enable GuC firmware loading "
205 "(-1=auto, 0=never [default], 1=if available, 2=required)");
206
207module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, int, 0400);
208MODULE_PARM_DESC(enable_guc_submission,
209 "Enable GuC submission "
210 "(-1=auto, 0=never [default], 1=if available, 2=required)");
203 211
204module_param_named(guc_log_level, i915.guc_log_level, int, 0400); 212module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
205MODULE_PARM_DESC(guc_log_level, 213MODULE_PARM_DESC(guc_log_level,
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 65e73dd7d970..1323261a0cdd 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -45,6 +45,8 @@ struct i915_params {
45 int enable_ips; 45 int enable_ips;
46 int invert_brightness; 46 int invert_brightness;
47 int enable_cmd_parser; 47 int enable_cmd_parser;
48 int enable_guc_loading;
49 int enable_guc_submission;
48 int guc_log_level; 50 int guc_log_level;
49 int use_mmio_flip; 51 int use_mmio_flip;
50 int mmio_debug; 52 int mmio_debug;
@@ -57,7 +59,6 @@ struct i915_params {
57 bool load_detect_test; 59 bool load_detect_test;
58 bool reset; 60 bool reset;
59 bool disable_display; 61 bool disable_display;
60 bool enable_guc_submission;
61 bool verbose_state_checks; 62 bool verbose_state_checks;
62 bool nuclear_pageflip; 63 bool nuclear_pageflip;
63 bool enable_dp_mst; 64 bool enable_dp_mst;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 482c10913ad6..dfb4c7a88de3 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7031,7 +7031,7 @@ enum skl_disp_power_wells {
7031#define VLV_RCEDATA _MMIO(0xA0BC) 7031#define VLV_RCEDATA _MMIO(0xA0BC)
7032#define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0) 7032#define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0)
7033#define GEN6_PMINTRMSK _MMIO(0xA168) 7033#define GEN6_PMINTRMSK _MMIO(0xA168)
7034#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31) 7034#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
7035#define VLV_PWRDWNUPCTL _MMIO(0xA294) 7035#define VLV_PWRDWNUPCTL _MMIO(0xA294)
7036#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4) 7036#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4)
7037#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8) 7037#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 37b6444b8e22..02507bfc8def 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -203,7 +203,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
203 struct drm_minor *dminor = dev_to_drm_minor(dev); 203 struct drm_minor *dminor = dev_to_drm_minor(dev);
204 struct drm_device *drm_dev = dminor->dev; 204 struct drm_device *drm_dev = dminor->dev;
205 struct drm_i915_private *dev_priv = drm_dev->dev_private; 205 struct drm_i915_private *dev_priv = drm_dev->dev_private;
206 struct intel_context *ctx; 206 struct i915_gem_context *ctx;
207 u32 *temp = NULL; /* Just here to make handling failures easy */ 207 u32 *temp = NULL; /* Just here to make handling failures easy */
208 int slice = (int)(uintptr_t)attr->private; 208 int slice = (int)(uintptr_t)attr->private;
209 int ret; 209 int ret;
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 20b2e4039792..6768db032f84 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -734,12 +734,12 @@ DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release,
734 * the context. 734 * the context.
735 */ 735 */
736DECLARE_EVENT_CLASS(i915_context, 736DECLARE_EVENT_CLASS(i915_context,
737 TP_PROTO(struct intel_context *ctx), 737 TP_PROTO(struct i915_gem_context *ctx),
738 TP_ARGS(ctx), 738 TP_ARGS(ctx),
739 739
740 TP_STRUCT__entry( 740 TP_STRUCT__entry(
741 __field(u32, dev) 741 __field(u32, dev)
742 __field(struct intel_context *, ctx) 742 __field(struct i915_gem_context *, ctx)
743 __field(struct i915_address_space *, vm) 743 __field(struct i915_address_space *, vm)
744 ), 744 ),
745 745
@@ -754,12 +754,12 @@ DECLARE_EVENT_CLASS(i915_context,
754) 754)
755 755
756DEFINE_EVENT(i915_context, i915_context_create, 756DEFINE_EVENT(i915_context, i915_context_create,
757 TP_PROTO(struct intel_context *ctx), 757 TP_PROTO(struct i915_gem_context *ctx),
758 TP_ARGS(ctx) 758 TP_ARGS(ctx)
759); 759);
760 760
761DEFINE_EVENT(i915_context, i915_context_free, 761DEFINE_EVENT(i915_context, i915_context_free,
762 TP_PROTO(struct intel_context *ctx), 762 TP_PROTO(struct i915_gem_context *ctx),
763 TP_ARGS(ctx) 763 TP_ARGS(ctx)
764); 764);
765 765
@@ -771,13 +771,13 @@ DEFINE_EVENT(i915_context, i915_context_free,
771 * called only if full ppgtt is enabled. 771 * called only if full ppgtt is enabled.
772 */ 772 */
773TRACE_EVENT(switch_mm, 773TRACE_EVENT(switch_mm,
774 TP_PROTO(struct intel_engine_cs *engine, struct intel_context *to), 774 TP_PROTO(struct intel_engine_cs *engine, struct i915_gem_context *to),
775 775
776 TP_ARGS(engine, to), 776 TP_ARGS(engine, to),
777 777
778 TP_STRUCT__entry( 778 TP_STRUCT__entry(
779 __field(u32, ring) 779 __field(u32, ring)
780 __field(struct intel_context *, to) 780 __field(struct i915_gem_context *, to)
781 __field(struct i915_address_space *, vm) 781 __field(struct i915_address_space *, vm)
782 __field(u32, dev) 782 __field(u32, dev)
783 ), 783 ),
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 8b68c4882fba..713a02db378a 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -139,6 +139,11 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
139 else 139 else
140 panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; 140 panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
141 141
142 panel_fixed_mode->width_mm = (dvo_timing->himage_hi << 8) |
143 dvo_timing->himage_lo;
144 panel_fixed_mode->height_mm = (dvo_timing->vimage_hi << 8) |
145 dvo_timing->vimage_lo;
146
142 /* Some VBTs have bogus h/vtotal values */ 147 /* Some VBTs have bogus h/vtotal values */
143 if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) 148 if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
144 panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; 149 panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
@@ -213,7 +218,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
213 218
214 dev_priv->vbt.lvds_dither = lvds_options->pixel_dither; 219 dev_priv->vbt.lvds_dither = lvds_options->pixel_dither;
215 220
216 ret = intel_opregion_get_panel_type(dev_priv->dev); 221 ret = intel_opregion_get_panel_type(dev_priv);
217 if (ret >= 0) { 222 if (ret >= 0) {
218 WARN_ON(ret > 0xf); 223 WARN_ON(ret > 0xf);
219 panel_type = ret; 224 panel_type = ret;
@@ -1206,7 +1211,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
1206 } 1211 }
1207 if (bdb->version < 106) { 1212 if (bdb->version < 106) {
1208 expected_size = 22; 1213 expected_size = 22;
1209 } else if (bdb->version < 109) { 1214 } else if (bdb->version < 111) {
1210 expected_size = 27; 1215 expected_size = 27;
1211 } else if (bdb->version < 195) { 1216 } else if (bdb->version < 195) {
1212 BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33); 1217 BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 3fbb6fc66451..622968161ac7 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -839,7 +839,7 @@ void intel_crt_init(struct drm_device *dev)
839 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); 839 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
840 840
841 drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs, 841 drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
842 DRM_MODE_ENCODER_DAC, NULL); 842 DRM_MODE_ENCODER_DAC, "CRT");
843 843
844 intel_connector_attach_encoder(intel_connector, &crt->base); 844 intel_connector_attach_encoder(intel_connector, &crt->base);
845 845
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index c454744dda0b..022b41d422dc 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -2347,7 +2347,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
2347 encoder = &intel_encoder->base; 2347 encoder = &intel_encoder->base;
2348 2348
2349 drm_encoder_init(dev, encoder, &intel_ddi_funcs, 2349 drm_encoder_init(dev, encoder, &intel_ddi_funcs,
2350 DRM_MODE_ENCODER_TMDS, NULL); 2350 DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port));
2351 2351
2352 intel_encoder->compute_config = intel_ddi_compute_config; 2352 intel_encoder->compute_config = intel_ddi_compute_config;
2353 intel_encoder->enable = intel_enable_ddi; 2353 intel_encoder->enable = intel_enable_ddi;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3f8987b7ee48..60cba1956c0d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -123,6 +123,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc);
123static void intel_modeset_setup_hw_state(struct drm_device *dev); 123static void intel_modeset_setup_hw_state(struct drm_device *dev);
124static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); 124static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
125static int ilk_max_pixel_rate(struct drm_atomic_state *state); 125static int ilk_max_pixel_rate(struct drm_atomic_state *state);
126static int broxton_calc_cdclk(int max_pixclk);
126 127
127struct intel_limit { 128struct intel_limit {
128 struct { 129 struct {
@@ -4277,8 +4278,9 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
4277 struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc); 4278 struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
4278 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; 4279 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4279 4280
4280 DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n", 4281 DRM_DEBUG_KMS("Updating scaler for [CRTC:%d:%s] scaler_user index %u.%u\n",
4281 intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX); 4282 intel_crtc->base.base.id, intel_crtc->base.name,
4283 intel_crtc->pipe, SKL_CRTC_INDEX);
4282 4284
4283 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, 4285 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4284 &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0), 4286 &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
@@ -4308,9 +4310,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4308 4310
4309 bool force_detach = !fb || !plane_state->visible; 4311 bool force_detach = !fb || !plane_state->visible;
4310 4312
4311 DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n", 4313 DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n",
4312 intel_plane->base.base.id, intel_crtc->pipe, 4314 intel_plane->base.base.id, intel_plane->base.name,
4313 drm_plane_index(&intel_plane->base)); 4315 intel_crtc->pipe, drm_plane_index(&intel_plane->base));
4314 4316
4315 ret = skl_update_scaler(crtc_state, force_detach, 4317 ret = skl_update_scaler(crtc_state, force_detach,
4316 drm_plane_index(&intel_plane->base), 4318 drm_plane_index(&intel_plane->base),
@@ -4326,8 +4328,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4326 4328
4327 /* check colorkey */ 4329 /* check colorkey */
4328 if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) { 4330 if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
4329 DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed", 4331 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
4330 intel_plane->base.base.id); 4332 intel_plane->base.base.id,
4333 intel_plane->base.name);
4331 return -EINVAL; 4334 return -EINVAL;
4332 } 4335 }
4333 4336
@@ -4346,8 +4349,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4346 case DRM_FORMAT_VYUY: 4349 case DRM_FORMAT_VYUY:
4347 break; 4350 break;
4348 default: 4351 default:
4349 DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n", 4352 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
4350 intel_plane->base.base.id, fb->base.id, fb->pixel_format); 4353 intel_plane->base.base.id, intel_plane->base.name,
4354 fb->base.id, fb->pixel_format);
4351 return -EINVAL; 4355 return -EINVAL;
4352 } 4356 }
4353 4357
@@ -5265,21 +5269,34 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
5265 return max_cdclk_freq*90/100; 5269 return max_cdclk_freq*90/100;
5266} 5270}
5267 5271
5272static int skl_calc_cdclk(int max_pixclk, int vco);
5273
5268static void intel_update_max_cdclk(struct drm_device *dev) 5274static void intel_update_max_cdclk(struct drm_device *dev)
5269{ 5275{
5270 struct drm_i915_private *dev_priv = dev->dev_private; 5276 struct drm_i915_private *dev_priv = dev->dev_private;
5271 5277
5272 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 5278 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
5273 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; 5279 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
5280 int max_cdclk, vco;
5281
5282 vco = dev_priv->skl_preferred_vco_freq;
5283 WARN_ON(vco != 8100000 && vco != 8640000);
5274 5284
5285 /*
5286 * Use the lower (vco 8640) cdclk values as a
5287 * first guess. skl_calc_cdclk() will correct it
5288 * if the preferred vco is 8100 instead.
5289 */
5275 if (limit == SKL_DFSM_CDCLK_LIMIT_675) 5290 if (limit == SKL_DFSM_CDCLK_LIMIT_675)
5276 dev_priv->max_cdclk_freq = 675000; 5291 max_cdclk = 617143;
5277 else if (limit == SKL_DFSM_CDCLK_LIMIT_540) 5292 else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
5278 dev_priv->max_cdclk_freq = 540000; 5293 max_cdclk = 540000;
5279 else if (limit == SKL_DFSM_CDCLK_LIMIT_450) 5294 else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
5280 dev_priv->max_cdclk_freq = 450000; 5295 max_cdclk = 432000;
5281 else 5296 else
5282 dev_priv->max_cdclk_freq = 337500; 5297 max_cdclk = 308571;
5298
5299 dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
5283 } else if (IS_BROXTON(dev)) { 5300 } else if (IS_BROXTON(dev)) {
5284 dev_priv->max_cdclk_freq = 624000; 5301 dev_priv->max_cdclk_freq = 624000;
5285 } else if (IS_BROADWELL(dev)) { 5302 } else if (IS_BROADWELL(dev)) {
@@ -5320,8 +5337,14 @@ static void intel_update_cdclk(struct drm_device *dev)
5320 struct drm_i915_private *dev_priv = dev->dev_private; 5337 struct drm_i915_private *dev_priv = dev->dev_private;
5321 5338
5322 dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev); 5339 dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
5323 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n", 5340
5324 dev_priv->cdclk_freq); 5341 if (INTEL_GEN(dev_priv) >= 9)
5342 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n",
5343 dev_priv->cdclk_freq, dev_priv->cdclk_pll.vco,
5344 dev_priv->cdclk_pll.ref);
5345 else
5346 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
5347 dev_priv->cdclk_freq);
5325 5348
5326 /* 5349 /*
5327 * 9:0 CMBUS [sic] CDCLK frequency (cdfreq): 5350 * 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
@@ -5331,9 +5354,6 @@ static void intel_update_cdclk(struct drm_device *dev)
5331 */ 5354 */
5332 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 5355 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5333 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000)); 5356 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
5334
5335 if (dev_priv->max_cdclk_freq == 0)
5336 intel_update_max_cdclk(dev);
5337} 5357}
5338 5358
5339/* convert from kHz to .1 fixpoint MHz with -1MHz offset */ 5359/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
@@ -5342,51 +5362,93 @@ static int skl_cdclk_decimal(int cdclk)
5342 return DIV_ROUND_CLOSEST(cdclk - 1000, 500); 5362 return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
5343} 5363}
5344 5364
5345static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int cdclk) 5365static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
5346{ 5366{
5347 uint32_t divider; 5367 int ratio;
5348 uint32_t ratio; 5368
5349 uint32_t current_cdclk; 5369 if (cdclk == dev_priv->cdclk_pll.ref)
5350 int ret; 5370 return 0;
5351 5371
5352 /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
5353 switch (cdclk) { 5372 switch (cdclk) {
5373 default:
5374 MISSING_CASE(cdclk);
5354 case 144000: 5375 case 144000:
5376 case 288000:
5377 case 384000:
5378 case 576000:
5379 ratio = 60;
5380 break;
5381 case 624000:
5382 ratio = 65;
5383 break;
5384 }
5385
5386 return dev_priv->cdclk_pll.ref * ratio;
5387}
5388
5389static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
5390{
5391 I915_WRITE(BXT_DE_PLL_ENABLE, 0);
5392
5393 /* Timeout 200us */
5394 if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1))
5395 DRM_ERROR("timeout waiting for DE PLL unlock\n");
5396
5397 dev_priv->cdclk_pll.vco = 0;
5398}
5399
5400static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
5401{
5402 int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk_pll.ref);
5403 u32 val;
5404
5405 val = I915_READ(BXT_DE_PLL_CTL);
5406 val &= ~BXT_DE_PLL_RATIO_MASK;
5407 val |= BXT_DE_PLL_RATIO(ratio);
5408 I915_WRITE(BXT_DE_PLL_CTL, val);
5409
5410 I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5411
5412 /* Timeout 200us */
5413 if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1))
5414 DRM_ERROR("timeout waiting for DE PLL lock\n");
5415
5416 dev_priv->cdclk_pll.vco = vco;
5417}
5418
5419static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
5420{
5421 u32 val, divider;
5422 int vco, ret;
5423
5424 vco = bxt_de_pll_vco(dev_priv, cdclk);
5425
5426 DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
5427
5428 /* cdclk = vco / 2 / div{1,1.5,2,4} */
5429 switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
5430 case 8:
5355 divider = BXT_CDCLK_CD2X_DIV_SEL_4; 5431 divider = BXT_CDCLK_CD2X_DIV_SEL_4;
5356 ratio = BXT_DE_PLL_RATIO(60);
5357 break; 5432 break;
5358 case 288000: 5433 case 4:
5359 divider = BXT_CDCLK_CD2X_DIV_SEL_2; 5434 divider = BXT_CDCLK_CD2X_DIV_SEL_2;
5360 ratio = BXT_DE_PLL_RATIO(60);
5361 break; 5435 break;
5362 case 384000: 5436 case 3:
5363 divider = BXT_CDCLK_CD2X_DIV_SEL_1_5; 5437 divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
5364 ratio = BXT_DE_PLL_RATIO(60);
5365 break; 5438 break;
5366 case 576000: 5439 case 2:
5367 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5368 ratio = BXT_DE_PLL_RATIO(60);
5369 break;
5370 case 624000:
5371 divider = BXT_CDCLK_CD2X_DIV_SEL_1; 5440 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5372 ratio = BXT_DE_PLL_RATIO(65);
5373 break;
5374 case 19200:
5375 /*
5376 * Bypass frequency with DE PLL disabled. Init ratio, divider
5377 * to suppress GCC warning.
5378 */
5379 ratio = 0;
5380 divider = 0;
5381 break; 5441 break;
5382 default: 5442 default:
5383 DRM_ERROR("unsupported CDCLK freq %d", cdclk); 5443 WARN_ON(cdclk != dev_priv->cdclk_pll.ref);
5444 WARN_ON(vco != 0);
5384 5445
5385 return; 5446 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5447 break;
5386 } 5448 }
5387 5449
5388 mutex_lock(&dev_priv->rps.hw_lock);
5389 /* Inform power controller of upcoming frequency change */ 5450 /* Inform power controller of upcoming frequency change */
5451 mutex_lock(&dev_priv->rps.hw_lock);
5390 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 5452 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5391 0x80000000); 5453 0x80000000);
5392 mutex_unlock(&dev_priv->rps.hw_lock); 5454 mutex_unlock(&dev_priv->rps.hw_lock);
@@ -5397,52 +5459,26 @@ static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
5397 return; 5459 return;
5398 } 5460 }
5399 5461
5400 current_cdclk = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK; 5462 if (dev_priv->cdclk_pll.vco != 0 &&
5401 /* convert from .1 fixpoint MHz with -1MHz offset to kHz */ 5463 dev_priv->cdclk_pll.vco != vco)
5402 current_cdclk = current_cdclk * 500 + 1000; 5464 bxt_de_pll_disable(dev_priv);
5403 5465
5466 if (dev_priv->cdclk_pll.vco != vco)
5467 bxt_de_pll_enable(dev_priv, vco);
5468
5469 val = divider | skl_cdclk_decimal(cdclk);
5404 /* 5470 /*
5405 * DE PLL has to be disabled when 5471 * FIXME if only the cd2x divider needs changing, it could be done
5406 * - setting to 19.2MHz (bypass, PLL isn't used) 5472 * without shutting off the pipe (if only one pipe is active).
5407 * - before setting to 624MHz (PLL needs toggling) 5473 */
5408 * - before setting to any frequency from 624MHz (PLL needs toggling) 5474 val |= BXT_CDCLK_CD2X_PIPE_NONE;
5409 */ 5475 /*
5410 if (cdclk == 19200 || cdclk == 624000 || 5476 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5411 current_cdclk == 624000) { 5477 * enable otherwise.
5412 I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE); 5478 */
5413 /* Timeout 200us */ 5479 if (cdclk >= 500000)
5414 if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK), 5480 val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5415 1)) 5481 I915_WRITE(CDCLK_CTL, val);
5416 DRM_ERROR("timout waiting for DE PLL unlock\n");
5417 }
5418
5419 if (cdclk != 19200) {
5420 uint32_t val;
5421
5422 val = I915_READ(BXT_DE_PLL_CTL);
5423 val &= ~BXT_DE_PLL_RATIO_MASK;
5424 val |= ratio;
5425 I915_WRITE(BXT_DE_PLL_CTL, val);
5426
5427 I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5428 /* Timeout 200us */
5429 if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
5430 DRM_ERROR("timeout waiting for DE PLL lock\n");
5431
5432 val = divider | skl_cdclk_decimal(cdclk);
5433 /*
5434 * FIXME if only the cd2x divider needs changing, it could be done
5435 * without shutting off the pipe (if only one pipe is active).
5436 */
5437 val |= BXT_CDCLK_CD2X_PIPE_NONE;
5438 /*
5439 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5440 * enable otherwise.
5441 */
5442 if (cdclk >= 500000)
5443 val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5444 I915_WRITE(CDCLK_CTL, val);
5445 }
5446 5482
5447 mutex_lock(&dev_priv->rps.hw_lock); 5483 mutex_lock(&dev_priv->rps.hw_lock);
5448 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 5484 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
@@ -5458,114 +5494,155 @@ static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
5458 intel_update_cdclk(dev_priv->dev); 5494 intel_update_cdclk(dev_priv->dev);
5459} 5495}
5460 5496
5461static bool broxton_cdclk_is_enabled(struct drm_i915_private *dev_priv) 5497static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
5462{ 5498{
5463 if (!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE)) 5499 u32 cdctl, expected;
5464 return false;
5465 5500
5466 /* TODO: Check for a valid CDCLK rate */ 5501 intel_update_cdclk(dev_priv->dev);
5467 5502
5468 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_REQUEST)) { 5503 if (dev_priv->cdclk_pll.vco == 0 ||
5469 DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power not requested\n"); 5504 dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
5505 goto sanitize;
5470 5506
5471 return false; 5507 /* DPLL okay; verify the cdclock
5472 } 5508 *
5509 * Some BIOS versions leave an incorrect decimal frequency value and
5510 * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
5511 * so sanitize this register.
5512 */
5513 cdctl = I915_READ(CDCLK_CTL);
5514 /*
5515 * Let's ignore the pipe field, since BIOS could have configured the
5516 * dividers both synching to an active pipe, or asynchronously
5517 * (PIPE_NONE).
5518 */
5519 cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
5473 5520
5474 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) { 5521 expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
5475 DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power hasn't settled\n"); 5522 skl_cdclk_decimal(dev_priv->cdclk_freq);
5523 /*
5524 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5525 * enable otherwise.
5526 */
5527 if (dev_priv->cdclk_freq >= 500000)
5528 expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5476 5529
5477 return false; 5530 if (cdctl == expected)
5478 } 5531 /* All well; nothing to sanitize */
5532 return;
5479 5533
5480 return true; 5534sanitize:
5481} 5535 DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
5482 5536
5483bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv) 5537 /* force cdclk programming */
5484{ 5538 dev_priv->cdclk_freq = 0;
5485 return broxton_cdclk_is_enabled(dev_priv); 5539
5540 /* force full PLL disable + enable */
5541 dev_priv->cdclk_pll.vco = -1;
5486} 5542}
5487 5543
5488void broxton_init_cdclk(struct drm_i915_private *dev_priv) 5544void broxton_init_cdclk(struct drm_i915_private *dev_priv)
5489{ 5545{
5490 /* check if cd clock is enabled */ 5546 bxt_sanitize_cdclk(dev_priv);
5491 if (broxton_cdclk_is_enabled(dev_priv)) {
5492 DRM_DEBUG_KMS("CDCLK already enabled, won't reprogram it\n");
5493 return;
5494 }
5495 5547
5496 DRM_DEBUG_KMS("CDCLK not enabled, enabling it\n"); 5548 if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0)
5549 return;
5497 5550
5498 /* 5551 /*
5499 * FIXME: 5552 * FIXME:
5500 * - The initial CDCLK needs to be read from VBT. 5553 * - The initial CDCLK needs to be read from VBT.
5501 * Need to make this change after VBT has changes for BXT. 5554 * Need to make this change after VBT has changes for BXT.
5502 * - check if setting the max (or any) cdclk freq is really necessary
5503 * here, it belongs to modeset time
5504 */ 5555 */
5505 broxton_set_cdclk(dev_priv, 624000); 5556 broxton_set_cdclk(dev_priv, broxton_calc_cdclk(0));
5506
5507 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5508 POSTING_READ(DBUF_CTL);
5509
5510 udelay(10);
5511
5512 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5513 DRM_ERROR("DBuf power enable timeout!\n");
5514} 5557}
5515 5558
5516void broxton_uninit_cdclk(struct drm_i915_private *dev_priv) 5559void broxton_uninit_cdclk(struct drm_i915_private *dev_priv)
5517{ 5560{
5518 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); 5561 broxton_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref);
5519 POSTING_READ(DBUF_CTL); 5562}
5520 5563
5521 udelay(10); 5564static int skl_calc_cdclk(int max_pixclk, int vco)
5565{
5566 if (vco == 8640000) {
5567 if (max_pixclk > 540000)
5568 return 617143;
5569 else if (max_pixclk > 432000)
5570 return 540000;
5571 else if (max_pixclk > 308571)
5572 return 432000;
5573 else
5574 return 308571;
5575 } else {
5576 if (max_pixclk > 540000)
5577 return 675000;
5578 else if (max_pixclk > 450000)
5579 return 540000;
5580 else if (max_pixclk > 337500)
5581 return 450000;
5582 else
5583 return 337500;
5584 }
5585}
5522 5586
5523 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) 5587static void
5524 DRM_ERROR("DBuf power disable timeout!\n"); 5588skl_dpll0_update(struct drm_i915_private *dev_priv)
5589{
5590 u32 val;
5525 5591
5526 /* Set minimum (bypass) frequency, in effect turning off the DE PLL */ 5592 dev_priv->cdclk_pll.ref = 24000;
5527 broxton_set_cdclk(dev_priv, 19200); 5593 dev_priv->cdclk_pll.vco = 0;
5528}
5529 5594
5530static const struct skl_cdclk_entry { 5595 val = I915_READ(LCPLL1_CTL);
5531 unsigned int freq; 5596 if ((val & LCPLL_PLL_ENABLE) == 0)
5532 unsigned int vco; 5597 return;
5533} skl_cdclk_frequencies[] = {
5534 { .freq = 308570, .vco = 8640 },
5535 { .freq = 337500, .vco = 8100 },
5536 { .freq = 432000, .vco = 8640 },
5537 { .freq = 450000, .vco = 8100 },
5538 { .freq = 540000, .vco = 8100 },
5539 { .freq = 617140, .vco = 8640 },
5540 { .freq = 675000, .vco = 8100 },
5541};
5542 5598
5543static unsigned int skl_cdclk_get_vco(unsigned int freq) 5599 if (WARN_ON((val & LCPLL_PLL_LOCK) == 0))
5544{ 5600 return;
5545 unsigned int i; 5601
5602 val = I915_READ(DPLL_CTRL1);
5546 5603
5547 for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) { 5604 if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
5548 const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i]; 5605 DPLL_CTRL1_SSC(SKL_DPLL0) |
5606 DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
5607 DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
5608 return;
5549 5609
5550 if (e->freq == freq) 5610 switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) {
5551 return e->vco; 5611 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0):
5612 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0):
5613 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0):
5614 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0):
5615 dev_priv->cdclk_pll.vco = 8100000;
5616 break;
5617 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0):
5618 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0):
5619 dev_priv->cdclk_pll.vco = 8640000;
5620 break;
5621 default:
5622 MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5623 break;
5552 } 5624 }
5625}
5626
5627void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco)
5628{
5629 bool changed = dev_priv->skl_preferred_vco_freq != vco;
5553 5630
5554 return 8100; 5631 dev_priv->skl_preferred_vco_freq = vco;
5632
5633 if (changed)
5634 intel_update_max_cdclk(dev_priv->dev);
5555} 5635}
5556 5636
5557static void 5637static void
5558skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) 5638skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
5559{ 5639{
5560 int min_cdclk; 5640 int min_cdclk = skl_calc_cdclk(0, vco);
5561 u32 val; 5641 u32 val;
5562 5642
5563 /* select the minimum CDCLK before enabling DPLL 0 */ 5643 WARN_ON(vco != 8100000 && vco != 8640000);
5564 if (vco == 8640)
5565 min_cdclk = 308570;
5566 else
5567 min_cdclk = 337500;
5568 5644
5645 /* select the minimum CDCLK before enabling DPLL 0 */
5569 val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk); 5646 val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
5570 I915_WRITE(CDCLK_CTL, val); 5647 I915_WRITE(CDCLK_CTL, val);
5571 POSTING_READ(CDCLK_CTL); 5648 POSTING_READ(CDCLK_CTL);
@@ -5577,14 +5654,14 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
5577 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640. 5654 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
5578 * The modeset code is responsible for the selection of the exact link 5655 * The modeset code is responsible for the selection of the exact link
5579 * rate later on, with the constraint of choosing a frequency that 5656 * rate later on, with the constraint of choosing a frequency that
5580 * works with required_vco. 5657 * works with vco.
5581 */ 5658 */
5582 val = I915_READ(DPLL_CTRL1); 5659 val = I915_READ(DPLL_CTRL1);
5583 5660
5584 val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) | 5661 val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
5585 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)); 5662 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5586 val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0); 5663 val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
5587 if (vco == 8640) 5664 if (vco == 8640000)
5588 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 5665 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
5589 SKL_DPLL0); 5666 SKL_DPLL0);
5590 else 5667 else
@@ -5598,6 +5675,11 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
5598 5675
5599 if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5)) 5676 if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
5600 DRM_ERROR("DPLL0 not locked\n"); 5677 DRM_ERROR("DPLL0 not locked\n");
5678
5679 dev_priv->cdclk_pll.vco = vco;
5680
5681 /* We'll want to keep using the current vco from now on. */
5682 skl_set_preferred_cdclk_vco(dev_priv, vco);
5601} 5683}
5602 5684
5603static void 5685static void
@@ -5606,6 +5688,8 @@ skl_dpll0_disable(struct drm_i915_private *dev_priv)
5606 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE); 5688 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
5607 if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1)) 5689 if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5608 DRM_ERROR("Couldn't disable DPLL0\n"); 5690 DRM_ERROR("Couldn't disable DPLL0\n");
5691
5692 dev_priv->cdclk_pll.vco = 0;
5609} 5693}
5610 5694
5611static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv) 5695static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
@@ -5635,12 +5719,14 @@ static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5635 return false; 5719 return false;
5636} 5720}
5637 5721
5638static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk) 5722static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
5639{ 5723{
5640 struct drm_device *dev = dev_priv->dev; 5724 struct drm_device *dev = dev_priv->dev;
5641 u32 freq_select, pcu_ack; 5725 u32 freq_select, pcu_ack;
5642 5726
5643 DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", cdclk); 5727 WARN_ON((cdclk == 24000) != (vco == 0));
5728
5729 DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
5644 5730
5645 if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) { 5731 if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
5646 DRM_ERROR("failed to inform PCU about cdclk change\n"); 5732 DRM_ERROR("failed to inform PCU about cdclk change\n");
@@ -5658,19 +5744,26 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
5658 freq_select = CDCLK_FREQ_540; 5744 freq_select = CDCLK_FREQ_540;
5659 pcu_ack = 2; 5745 pcu_ack = 2;
5660 break; 5746 break;
5661 case 308570: 5747 case 308571:
5662 case 337500: 5748 case 337500:
5663 default: 5749 default:
5664 freq_select = CDCLK_FREQ_337_308; 5750 freq_select = CDCLK_FREQ_337_308;
5665 pcu_ack = 0; 5751 pcu_ack = 0;
5666 break; 5752 break;
5667 case 617140: 5753 case 617143:
5668 case 675000: 5754 case 675000:
5669 freq_select = CDCLK_FREQ_675_617; 5755 freq_select = CDCLK_FREQ_675_617;
5670 pcu_ack = 3; 5756 pcu_ack = 3;
5671 break; 5757 break;
5672 } 5758 }
5673 5759
5760 if (dev_priv->cdclk_pll.vco != 0 &&
5761 dev_priv->cdclk_pll.vco != vco)
5762 skl_dpll0_disable(dev_priv);
5763
5764 if (dev_priv->cdclk_pll.vco != vco)
5765 skl_dpll0_enable(dev_priv, vco);
5766
5674 I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk)); 5767 I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
5675 POSTING_READ(CDCLK_CTL); 5768 POSTING_READ(CDCLK_CTL);
5676 5769
@@ -5682,49 +5775,41 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
5682 intel_update_cdclk(dev); 5775 intel_update_cdclk(dev);
5683} 5776}
5684 5777
5778static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
5779
5685void skl_uninit_cdclk(struct drm_i915_private *dev_priv) 5780void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5686{ 5781{
5687 /* disable DBUF power */ 5782 skl_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref, 0);
5688 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5689 POSTING_READ(DBUF_CTL);
5690
5691 udelay(10);
5692
5693 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5694 DRM_ERROR("DBuf power disable timeout\n");
5695
5696 skl_dpll0_disable(dev_priv);
5697} 5783}
5698 5784
5699void skl_init_cdclk(struct drm_i915_private *dev_priv) 5785void skl_init_cdclk(struct drm_i915_private *dev_priv)
5700{ 5786{
5701 unsigned int vco; 5787 int cdclk, vco;
5702 5788
5703 /* DPLL0 not enabled (happens on early BIOS versions) */ 5789 skl_sanitize_cdclk(dev_priv);
5704 if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
5705 /* enable DPLL0 */
5706 vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
5707 skl_dpll0_enable(dev_priv, vco);
5708 }
5709 5790
5710 /* set CDCLK to the frequency the BIOS chose */ 5791 if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) {
5711 skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk); 5792 /*
5712 5793 * Use the current vco as our initial
5713 /* enable DBUF power */ 5794 * guess as to what the preferred vco is.
5714 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); 5795 */
5715 POSTING_READ(DBUF_CTL); 5796 if (dev_priv->skl_preferred_vco_freq == 0)
5797 skl_set_preferred_cdclk_vco(dev_priv,
5798 dev_priv->cdclk_pll.vco);
5799 return;
5800 }
5716 5801
5717 udelay(10); 5802 vco = dev_priv->skl_preferred_vco_freq;
5803 if (vco == 0)
5804 vco = 8100000;
5805 cdclk = skl_calc_cdclk(0, vco);
5718 5806
5719 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) 5807 skl_set_cdclk(dev_priv, cdclk, vco);
5720 DRM_ERROR("DBuf power enable timeout\n");
5721} 5808}
5722 5809
5723int skl_sanitize_cdclk(struct drm_i915_private *dev_priv) 5810static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5724{ 5811{
5725 uint32_t lcpll1 = I915_READ(LCPLL1_CTL); 5812 uint32_t cdctl, expected;
5726 uint32_t cdctl = I915_READ(CDCLK_CTL);
5727 int freq = dev_priv->skl_boot_cdclk;
5728 5813
5729 /* 5814 /*
5730 * check if the pre-os intialized the display 5815 * check if the pre-os intialized the display
@@ -5734,8 +5819,10 @@ int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5734 if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0) 5819 if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
5735 goto sanitize; 5820 goto sanitize;
5736 5821
5822 intel_update_cdclk(dev_priv->dev);
5737 /* Is PLL enabled and locked ? */ 5823 /* Is PLL enabled and locked ? */
5738 if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK))) 5824 if (dev_priv->cdclk_pll.vco == 0 ||
5825 dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
5739 goto sanitize; 5826 goto sanitize;
5740 5827
5741 /* DPLL okay; verify the cdclock 5828 /* DPLL okay; verify the cdclock
@@ -5744,19 +5831,20 @@ int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5744 * decimal part is programmed wrong from BIOS where pre-os does not 5831 * decimal part is programmed wrong from BIOS where pre-os does not
5745 * enable display. Verify the same as well. 5832 * enable display. Verify the same as well.
5746 */ 5833 */
5747 if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq))) 5834 cdctl = I915_READ(CDCLK_CTL);
5835 expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
5836 skl_cdclk_decimal(dev_priv->cdclk_freq);
5837 if (cdctl == expected)
5748 /* All well; nothing to sanitize */ 5838 /* All well; nothing to sanitize */
5749 return false; 5839 return;
5840
5750sanitize: 5841sanitize:
5751 /* 5842 DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
5752 * As of now initialize with max cdclk till
5753 * we get dynamic cdclk support
5754 * */
5755 dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq;
5756 skl_init_cdclk(dev_priv);
5757 5843
5758 /* we did have to sanitize */ 5844 /* force cdclk programming */
5759 return true; 5845 dev_priv->cdclk_freq = 0;
5846 /* force full PLL disable + enable */
5847 dev_priv->cdclk_pll.vco = -1;
5760} 5848}
5761 5849
5762/* Adjust CDclk dividers to allow high res or save power if possible */ 5850/* Adjust CDclk dividers to allow high res or save power if possible */
@@ -5898,10 +5986,6 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
5898 5986
5899static int broxton_calc_cdclk(int max_pixclk) 5987static int broxton_calc_cdclk(int max_pixclk)
5900{ 5988{
5901 /*
5902 * FIXME:
5903 * - set 19.2MHz bypass frequency if there are no active pipes
5904 */
5905 if (max_pixclk > 576000) 5989 if (max_pixclk > 576000)
5906 return 624000; 5990 return 624000;
5907 else if (max_pixclk > 384000) 5991 else if (max_pixclk > 384000)
@@ -6242,8 +6326,8 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6242 6326
6243 dev_priv->display.crtc_disable(crtc); 6327 dev_priv->display.crtc_disable(crtc);
6244 6328
6245 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was enabled, now disabled\n", 6329 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6246 crtc->base.id); 6330 crtc->base.id, crtc->name);
6247 6331
6248 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0); 6332 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6249 crtc->state->active = false; 6333 crtc->state->active = false;
@@ -6543,10 +6627,10 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
6543 struct drm_device *dev = crtc->base.dev; 6627 struct drm_device *dev = crtc->base.dev;
6544 struct drm_i915_private *dev_priv = dev->dev_private; 6628 struct drm_i915_private *dev_priv = dev->dev_private;
6545 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6629 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6630 int clock_limit = dev_priv->max_dotclk_freq;
6546 6631
6547 /* FIXME should check pixel clock limits on all platforms */
6548 if (INTEL_INFO(dev)->gen < 4) { 6632 if (INTEL_INFO(dev)->gen < 4) {
6549 int clock_limit = dev_priv->max_cdclk_freq * 9 / 10; 6633 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
6550 6634
6551 /* 6635 /*
6552 * Enable double wide mode when the dot clock 6636 * Enable double wide mode when the dot clock
@@ -6554,16 +6638,16 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
6554 */ 6638 */
6555 if (intel_crtc_supports_double_wide(crtc) && 6639 if (intel_crtc_supports_double_wide(crtc) &&
6556 adjusted_mode->crtc_clock > clock_limit) { 6640 adjusted_mode->crtc_clock > clock_limit) {
6557 clock_limit *= 2; 6641 clock_limit = dev_priv->max_dotclk_freq;
6558 pipe_config->double_wide = true; 6642 pipe_config->double_wide = true;
6559 } 6643 }
6644 }
6560 6645
6561 if (adjusted_mode->crtc_clock > clock_limit) { 6646 if (adjusted_mode->crtc_clock > clock_limit) {
6562 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 6647 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6563 adjusted_mode->crtc_clock, clock_limit, 6648 adjusted_mode->crtc_clock, clock_limit,
6564 yesno(pipe_config->double_wide)); 6649 yesno(pipe_config->double_wide));
6565 return -EINVAL; 6650 return -EINVAL;
6566 }
6567 } 6651 }
6568 6652
6569 /* 6653 /*
@@ -6595,76 +6679,98 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
6595static int skylake_get_display_clock_speed(struct drm_device *dev) 6679static int skylake_get_display_clock_speed(struct drm_device *dev)
6596{ 6680{
6597 struct drm_i915_private *dev_priv = to_i915(dev); 6681 struct drm_i915_private *dev_priv = to_i915(dev);
6598 uint32_t lcpll1 = I915_READ(LCPLL1_CTL); 6682 uint32_t cdctl;
6599 uint32_t cdctl = I915_READ(CDCLK_CTL);
6600 uint32_t linkrate;
6601 6683
6602 if (!(lcpll1 & LCPLL_PLL_ENABLE)) 6684 skl_dpll0_update(dev_priv);
6603 return 24000; /* 24MHz is the cd freq with NSSC ref */
6604 6685
6605 if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540) 6686 if (dev_priv->cdclk_pll.vco == 0)
6606 return 540000; 6687 return dev_priv->cdclk_pll.ref;
6607 6688
6608 linkrate = (I915_READ(DPLL_CTRL1) & 6689 cdctl = I915_READ(CDCLK_CTL);
6609 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
6610 6690
6611 if (linkrate == DPLL_CTRL1_LINK_RATE_2160 || 6691 if (dev_priv->cdclk_pll.vco == 8640000) {
6612 linkrate == DPLL_CTRL1_LINK_RATE_1080) {
6613 /* vco 8640 */
6614 switch (cdctl & CDCLK_FREQ_SEL_MASK) { 6692 switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6615 case CDCLK_FREQ_450_432: 6693 case CDCLK_FREQ_450_432:
6616 return 432000; 6694 return 432000;
6617 case CDCLK_FREQ_337_308: 6695 case CDCLK_FREQ_337_308:
6618 return 308570; 6696 return 308571;
6697 case CDCLK_FREQ_540:
6698 return 540000;
6619 case CDCLK_FREQ_675_617: 6699 case CDCLK_FREQ_675_617:
6620 return 617140; 6700 return 617143;
6621 default: 6701 default:
6622 WARN(1, "Unknown cd freq selection\n"); 6702 MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
6623 } 6703 }
6624 } else { 6704 } else {
6625 /* vco 8100 */
6626 switch (cdctl & CDCLK_FREQ_SEL_MASK) { 6705 switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6627 case CDCLK_FREQ_450_432: 6706 case CDCLK_FREQ_450_432:
6628 return 450000; 6707 return 450000;
6629 case CDCLK_FREQ_337_308: 6708 case CDCLK_FREQ_337_308:
6630 return 337500; 6709 return 337500;
6710 case CDCLK_FREQ_540:
6711 return 540000;
6631 case CDCLK_FREQ_675_617: 6712 case CDCLK_FREQ_675_617:
6632 return 675000; 6713 return 675000;
6633 default: 6714 default:
6634 WARN(1, "Unknown cd freq selection\n"); 6715 MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
6635 } 6716 }
6636 } 6717 }
6637 6718
6638 /* error case, do as if DPLL0 isn't enabled */ 6719 return dev_priv->cdclk_pll.ref;
6639 return 24000; 6720}
6721
6722static void bxt_de_pll_update(struct drm_i915_private *dev_priv)
6723{
6724 u32 val;
6725
6726 dev_priv->cdclk_pll.ref = 19200;
6727 dev_priv->cdclk_pll.vco = 0;
6728
6729 val = I915_READ(BXT_DE_PLL_ENABLE);
6730 if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
6731 return;
6732
6733 if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
6734 return;
6735
6736 val = I915_READ(BXT_DE_PLL_CTL);
6737 dev_priv->cdclk_pll.vco = (val & BXT_DE_PLL_RATIO_MASK) *
6738 dev_priv->cdclk_pll.ref;
6640} 6739}
6641 6740
6642static int broxton_get_display_clock_speed(struct drm_device *dev) 6741static int broxton_get_display_clock_speed(struct drm_device *dev)
6643{ 6742{
6644 struct drm_i915_private *dev_priv = to_i915(dev); 6743 struct drm_i915_private *dev_priv = to_i915(dev);
6645 uint32_t cdctl = I915_READ(CDCLK_CTL); 6744 u32 divider;
6646 uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK; 6745 int div, vco;
6647 uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE); 6746
6648 int cdclk; 6747 bxt_de_pll_update(dev_priv);
6649 6748
6650 if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE)) 6749 vco = dev_priv->cdclk_pll.vco;
6651 return 19200; 6750 if (vco == 0)
6751 return dev_priv->cdclk_pll.ref;
6652 6752
6653 cdclk = 19200 * pll_ratio / 2; 6753 divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
6654 6754
6655 switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) { 6755 switch (divider) {
6656 case BXT_CDCLK_CD2X_DIV_SEL_1: 6756 case BXT_CDCLK_CD2X_DIV_SEL_1:
6657 return cdclk; /* 576MHz or 624MHz */ 6757 div = 2;
6758 break;
6658 case BXT_CDCLK_CD2X_DIV_SEL_1_5: 6759 case BXT_CDCLK_CD2X_DIV_SEL_1_5:
6659 return cdclk * 2 / 3; /* 384MHz */ 6760 div = 3;
6761 break;
6660 case BXT_CDCLK_CD2X_DIV_SEL_2: 6762 case BXT_CDCLK_CD2X_DIV_SEL_2:
6661 return cdclk / 2; /* 288MHz */ 6763 div = 4;
6764 break;
6662 case BXT_CDCLK_CD2X_DIV_SEL_4: 6765 case BXT_CDCLK_CD2X_DIV_SEL_4:
6663 return cdclk / 4; /* 144MHz */ 6766 div = 8;
6767 break;
6768 default:
6769 MISSING_CASE(divider);
6770 return dev_priv->cdclk_pll.ref;
6664 } 6771 }
6665 6772
6666 /* error case, do as if DE PLL isn't enabled */ 6773 return DIV_ROUND_CLOSEST(vco, div);
6667 return 19200;
6668} 6774}
6669 6775
6670static int broadwell_get_display_clock_speed(struct drm_device *dev) 6776static int broadwell_get_display_clock_speed(struct drm_device *dev)
@@ -8255,12 +8361,14 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
8255{ 8361{
8256 struct drm_i915_private *dev_priv = dev->dev_private; 8362 struct drm_i915_private *dev_priv = dev->dev_private;
8257 struct intel_encoder *encoder; 8363 struct intel_encoder *encoder;
8364 int i;
8258 u32 val, final; 8365 u32 val, final;
8259 bool has_lvds = false; 8366 bool has_lvds = false;
8260 bool has_cpu_edp = false; 8367 bool has_cpu_edp = false;
8261 bool has_panel = false; 8368 bool has_panel = false;
8262 bool has_ck505 = false; 8369 bool has_ck505 = false;
8263 bool can_ssc = false; 8370 bool can_ssc = false;
8371 bool using_ssc_source = false;
8264 8372
8265 /* We need to take the global config into account */ 8373 /* We need to take the global config into account */
8266 for_each_intel_encoder(dev, encoder) { 8374 for_each_intel_encoder(dev, encoder) {
@@ -8287,8 +8395,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
8287 can_ssc = true; 8395 can_ssc = true;
8288 } 8396 }
8289 8397
8290 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n", 8398 /* Check if any DPLLs are using the SSC source */
8291 has_panel, has_lvds, has_ck505); 8399 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8400 u32 temp = I915_READ(PCH_DPLL(i));
8401
8402 if (!(temp & DPLL_VCO_ENABLE))
8403 continue;
8404
8405 if ((temp & PLL_REF_INPUT_MASK) ==
8406 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8407 using_ssc_source = true;
8408 break;
8409 }
8410 }
8411
8412 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8413 has_panel, has_lvds, has_ck505, using_ssc_source);
8292 8414
8293 /* Ironlake: try to setup display ref clock before DPLL 8415 /* Ironlake: try to setup display ref clock before DPLL
8294 * enabling. This is only under driver's control after 8416 * enabling. This is only under driver's control after
@@ -8308,9 +8430,12 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
8308 else 8430 else
8309 final |= DREF_NONSPREAD_SOURCE_ENABLE; 8431 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8310 8432
8311 final &= ~DREF_SSC_SOURCE_MASK;
8312 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 8433 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8313 final &= ~DREF_SSC1_ENABLE; 8434
8435 if (!using_ssc_source) {
8436 final &= ~DREF_SSC_SOURCE_MASK;
8437 final &= ~DREF_SSC1_ENABLE;
8438 }
8314 8439
8315 if (has_panel) { 8440 if (has_panel) {
8316 final |= DREF_SSC_SOURCE_ENABLE; 8441 final |= DREF_SSC_SOURCE_ENABLE;
@@ -8373,7 +8498,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
8373 POSTING_READ(PCH_DREF_CONTROL); 8498 POSTING_READ(PCH_DREF_CONTROL);
8374 udelay(200); 8499 udelay(200);
8375 } else { 8500 } else {
8376 DRM_DEBUG_KMS("Disabling SSC entirely\n"); 8501 DRM_DEBUG_KMS("Disabling CPU source output\n");
8377 8502
8378 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 8503 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8379 8504
@@ -8384,16 +8509,20 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
8384 POSTING_READ(PCH_DREF_CONTROL); 8509 POSTING_READ(PCH_DREF_CONTROL);
8385 udelay(200); 8510 udelay(200);
8386 8511
8387 /* Turn off the SSC source */ 8512 if (!using_ssc_source) {
8388 val &= ~DREF_SSC_SOURCE_MASK; 8513 DRM_DEBUG_KMS("Disabling SSC source\n");
8389 val |= DREF_SSC_SOURCE_DISABLE;
8390 8514
8391 /* Turn off SSC1 */ 8515 /* Turn off the SSC source */
8392 val &= ~DREF_SSC1_ENABLE; 8516 val &= ~DREF_SSC_SOURCE_MASK;
8517 val |= DREF_SSC_SOURCE_DISABLE;
8393 8518
8394 I915_WRITE(PCH_DREF_CONTROL, val); 8519 /* Turn off SSC1 */
8395 POSTING_READ(PCH_DREF_CONTROL); 8520 val &= ~DREF_SSC1_ENABLE;
8396 udelay(200); 8521
8522 I915_WRITE(PCH_DREF_CONTROL, val);
8523 POSTING_READ(PCH_DREF_CONTROL);
8524 udelay(200);
8525 }
8397 } 8526 }
8398 8527
8399 BUG_ON(val != final); 8528 BUG_ON(val != final);
@@ -9719,6 +9848,47 @@ static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9719 broadwell_set_cdclk(dev, req_cdclk); 9848 broadwell_set_cdclk(dev, req_cdclk);
9720} 9849}
9721 9850
9851static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
9852{
9853 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
9854 struct drm_i915_private *dev_priv = to_i915(state->dev);
9855 const int max_pixclk = ilk_max_pixel_rate(state);
9856 int vco = intel_state->cdclk_pll_vco;
9857 int cdclk;
9858
9859 /*
9860 * FIXME should also account for plane ratio
9861 * once 64bpp pixel formats are supported.
9862 */
9863 cdclk = skl_calc_cdclk(max_pixclk, vco);
9864
9865 /*
9866 * FIXME move the cdclk caclulation to
9867 * compute_config() so we can fail gracegully.
9868 */
9869 if (cdclk > dev_priv->max_cdclk_freq) {
9870 DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
9871 cdclk, dev_priv->max_cdclk_freq);
9872 cdclk = dev_priv->max_cdclk_freq;
9873 }
9874
9875 intel_state->cdclk = intel_state->dev_cdclk = cdclk;
9876 if (!intel_state->active_crtcs)
9877 intel_state->dev_cdclk = skl_calc_cdclk(0, vco);
9878
9879 return 0;
9880}
9881
9882static void skl_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9883{
9884 struct drm_i915_private *dev_priv = to_i915(old_state->dev);
9885 struct intel_atomic_state *intel_state = to_intel_atomic_state(old_state);
9886 unsigned int req_cdclk = intel_state->dev_cdclk;
9887 unsigned int req_vco = intel_state->cdclk_pll_vco;
9888
9889 skl_set_cdclk(dev_priv, req_cdclk, req_vco);
9890}
9891
9722static int haswell_crtc_compute_clock(struct intel_crtc *crtc, 9892static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9723 struct intel_crtc_state *crtc_state) 9893 struct intel_crtc_state *crtc_state)
9724{ 9894{
@@ -11765,12 +11935,12 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11765 struct drm_i915_private *dev_priv = to_i915(dev); 11935 struct drm_i915_private *dev_priv = to_i915(dev);
11766 struct intel_plane_state *old_plane_state = 11936 struct intel_plane_state *old_plane_state =
11767 to_intel_plane_state(plane->state); 11937 to_intel_plane_state(plane->state);
11768 int idx = intel_crtc->base.base.id, ret;
11769 bool mode_changed = needs_modeset(crtc_state); 11938 bool mode_changed = needs_modeset(crtc_state);
11770 bool was_crtc_enabled = crtc->state->active; 11939 bool was_crtc_enabled = crtc->state->active;
11771 bool is_crtc_enabled = crtc_state->active; 11940 bool is_crtc_enabled = crtc_state->active;
11772 bool turn_off, turn_on, visible, was_visible; 11941 bool turn_off, turn_on, visible, was_visible;
11773 struct drm_framebuffer *fb = plane_state->fb; 11942 struct drm_framebuffer *fb = plane_state->fb;
11943 int ret;
11774 11944
11775 if (crtc_state && INTEL_INFO(dev)->gen >= 9 && 11945 if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
11776 plane->type != DRM_PLANE_TYPE_CURSOR) { 11946 plane->type != DRM_PLANE_TYPE_CURSOR) {
@@ -11809,11 +11979,15 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11809 turn_off = was_visible && (!visible || mode_changed); 11979 turn_off = was_visible && (!visible || mode_changed);
11810 turn_on = visible && (!was_visible || mode_changed); 11980 turn_on = visible && (!was_visible || mode_changed);
11811 11981
11812 DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx, 11982 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
11813 plane->base.id, fb ? fb->base.id : -1); 11983 intel_crtc->base.base.id,
11984 intel_crtc->base.name,
11985 plane->base.id, plane->name,
11986 fb ? fb->base.id : -1);
11814 11987
11815 DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n", 11988 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11816 plane->base.id, was_visible, visible, 11989 plane->base.id, plane->name,
11990 was_visible, visible,
11817 turn_off, turn_on, mode_changed); 11991 turn_off, turn_on, mode_changed);
11818 11992
11819 if (turn_on) { 11993 if (turn_on) {
@@ -12104,7 +12278,8 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
12104 struct intel_plane_state *state; 12278 struct intel_plane_state *state;
12105 struct drm_framebuffer *fb; 12279 struct drm_framebuffer *fb;
12106 12280
12107 DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id, 12281 DRM_DEBUG_KMS("[CRTC:%d:%s]%s config %p for pipe %c\n",
12282 crtc->base.base.id, crtc->base.name,
12108 context, pipe_config, pipe_name(crtc->pipe)); 12283 context, pipe_config, pipe_name(crtc->pipe));
12109 12284
12110 DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder)); 12285 DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder));
@@ -12205,29 +12380,24 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
12205 state = to_intel_plane_state(plane->state); 12380 state = to_intel_plane_state(plane->state);
12206 fb = state->base.fb; 12381 fb = state->base.fb;
12207 if (!fb) { 12382 if (!fb) {
12208 DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d " 12383 DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
12209 "disabled, scaler_id = %d\n", 12384 plane->base.id, plane->name, state->scaler_id);
12210 plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12211 plane->base.id, intel_plane->pipe,
12212 (crtc->base.primary == plane) ? 0 : intel_plane->plane + 1,
12213 drm_plane_index(plane), state->scaler_id);
12214 continue; 12385 continue;
12215 } 12386 }
12216 12387
12217 DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled", 12388 DRM_DEBUG_KMS("[PLANE:%d:%s] enabled",
12218 plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD", 12389 plane->base.id, plane->name);
12219 plane->base.id, intel_plane->pipe, 12390 DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s",
12220 crtc->base.primary == plane ? 0 : intel_plane->plane + 1, 12391 fb->base.id, fb->width, fb->height,
12221 drm_plane_index(plane)); 12392 drm_get_format_name(fb->pixel_format));
12222 DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x", 12393 DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
12223 fb->base.id, fb->width, fb->height, fb->pixel_format); 12394 state->scaler_id,
12224 DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n", 12395 state->src.x1 >> 16, state->src.y1 >> 16,
12225 state->scaler_id, 12396 drm_rect_width(&state->src) >> 16,
12226 state->src.x1 >> 16, state->src.y1 >> 16, 12397 drm_rect_height(&state->src) >> 16,
12227 drm_rect_width(&state->src) >> 16, 12398 state->dst.x1, state->dst.y1,
12228 drm_rect_height(&state->src) >> 16, 12399 drm_rect_width(&state->dst),
12229 state->dst.x1, state->dst.y1, 12400 drm_rect_height(&state->dst));
12230 drm_rect_width(&state->dst), drm_rect_height(&state->dst));
12231 } 12401 }
12232} 12402}
12233 12403
@@ -12894,7 +13064,7 @@ verify_crtc_state(struct drm_crtc *crtc,
12894 pipe_config->base.crtc = crtc; 13064 pipe_config->base.crtc = crtc;
12895 pipe_config->base.state = old_state; 13065 pipe_config->base.state = old_state;
12896 13066
12897 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); 13067 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
12898 13068
12899 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config); 13069 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
12900 13070
@@ -13255,9 +13425,17 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
13255 * adjusted_mode bits in the crtc directly. 13425 * adjusted_mode bits in the crtc directly.
13256 */ 13426 */
13257 if (dev_priv->display.modeset_calc_cdclk) { 13427 if (dev_priv->display.modeset_calc_cdclk) {
13428 if (!intel_state->cdclk_pll_vco)
13429 intel_state->cdclk_pll_vco = dev_priv->cdclk_pll.vco;
13430 if (!intel_state->cdclk_pll_vco)
13431 intel_state->cdclk_pll_vco = dev_priv->skl_preferred_vco_freq;
13432
13258 ret = dev_priv->display.modeset_calc_cdclk(state); 13433 ret = dev_priv->display.modeset_calc_cdclk(state);
13434 if (ret < 0)
13435 return ret;
13259 13436
13260 if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq) 13437 if (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
13438 intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)
13261 ret = intel_modeset_all_pipes(state); 13439 ret = intel_modeset_all_pipes(state);
13262 13440
13263 if (ret < 0) 13441 if (ret < 0)
@@ -13606,7 +13784,8 @@ static int intel_atomic_commit(struct drm_device *dev,
13606 drm_atomic_helper_update_legacy_modeset_state(state->dev, state); 13784 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
13607 13785
13608 if (dev_priv->display.modeset_commit_cdclk && 13786 if (dev_priv->display.modeset_commit_cdclk &&
13609 intel_state->dev_cdclk != dev_priv->cdclk_freq) 13787 (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
13788 intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco))
13610 dev_priv->display.modeset_commit_cdclk(state); 13789 dev_priv->display.modeset_commit_cdclk(state);
13611 13790
13612 intel_modeset_verify_disabled(dev); 13791 intel_modeset_verify_disabled(dev);
@@ -13702,8 +13881,8 @@ void intel_crtc_restore_mode(struct drm_crtc *crtc)
13702 13881
13703 state = drm_atomic_state_alloc(dev); 13882 state = drm_atomic_state_alloc(dev);
13704 if (!state) { 13883 if (!state) {
13705 DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory", 13884 DRM_DEBUG_KMS("[CRTC:%d:%s] crtc restore failed, out of memory",
13706 crtc->base.id); 13885 crtc->base.id, crtc->name);
13707 return; 13886 return;
13708 } 13887 }
13709 13888
@@ -13971,9 +14150,11 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
13971 */ 14150 */
13972void intel_plane_destroy(struct drm_plane *plane) 14151void intel_plane_destroy(struct drm_plane *plane)
13973{ 14152{
13974 struct intel_plane *intel_plane = to_intel_plane(plane); 14153 if (!plane)
14154 return;
14155
13975 drm_plane_cleanup(plane); 14156 drm_plane_cleanup(plane);
13976 kfree(intel_plane); 14157 kfree(to_intel_plane(plane));
13977} 14158}
13978 14159
13979const struct drm_plane_funcs intel_plane_funcs = { 14160const struct drm_plane_funcs intel_plane_funcs = {
@@ -14045,10 +14226,24 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
14045 primary->disable_plane = i9xx_disable_primary_plane; 14226 primary->disable_plane = i9xx_disable_primary_plane;
14046 } 14227 }
14047 14228
14048 ret = drm_universal_plane_init(dev, &primary->base, 0, 14229 if (INTEL_INFO(dev)->gen >= 9)
14049 &intel_plane_funcs, 14230 ret = drm_universal_plane_init(dev, &primary->base, 0,
14050 intel_primary_formats, num_formats, 14231 &intel_plane_funcs,
14051 DRM_PLANE_TYPE_PRIMARY, NULL); 14232 intel_primary_formats, num_formats,
14233 DRM_PLANE_TYPE_PRIMARY,
14234 "plane 1%c", pipe_name(pipe));
14235 else if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
14236 ret = drm_universal_plane_init(dev, &primary->base, 0,
14237 &intel_plane_funcs,
14238 intel_primary_formats, num_formats,
14239 DRM_PLANE_TYPE_PRIMARY,
14240 "primary %c", pipe_name(pipe));
14241 else
14242 ret = drm_universal_plane_init(dev, &primary->base, 0,
14243 &intel_plane_funcs,
14244 intel_primary_formats, num_formats,
14245 DRM_PLANE_TYPE_PRIMARY,
14246 "plane %c", plane_name(primary->plane));
14052 if (ret) 14247 if (ret)
14053 goto fail; 14248 goto fail;
14054 14249
@@ -14206,7 +14401,8 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
14206 &intel_plane_funcs, 14401 &intel_plane_funcs,
14207 intel_cursor_formats, 14402 intel_cursor_formats,
14208 ARRAY_SIZE(intel_cursor_formats), 14403 ARRAY_SIZE(intel_cursor_formats),
14209 DRM_PLANE_TYPE_CURSOR, NULL); 14404 DRM_PLANE_TYPE_CURSOR,
14405 "cursor %c", pipe_name(pipe));
14210 if (ret) 14406 if (ret)
14211 goto fail; 14407 goto fail;
14212 14408
@@ -14291,7 +14487,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
14291 goto fail; 14487 goto fail;
14292 14488
14293 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary, 14489 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
14294 cursor, &intel_crtc_funcs, NULL); 14490 cursor, &intel_crtc_funcs,
14491 "pipe %c", pipe_name(pipe));
14295 if (ret) 14492 if (ret)
14296 goto fail; 14493 goto fail;
14297 14494
@@ -14325,10 +14522,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
14325 return; 14522 return;
14326 14523
14327fail: 14524fail:
14328 if (primary) 14525 intel_plane_destroy(primary);
14329 drm_plane_cleanup(primary); 14526 intel_plane_destroy(cursor);
14330 if (cursor)
14331 drm_plane_cleanup(cursor);
14332 kfree(crtc_state); 14527 kfree(crtc_state);
14333 kfree(intel_crtc); 14528 kfree(intel_crtc);
14334} 14529}
@@ -14507,6 +14702,8 @@ static void intel_setup_outputs(struct drm_device *dev)
14507 if (I915_READ(PCH_DP_D) & DP_DETECTED) 14702 if (I915_READ(PCH_DP_D) & DP_DETECTED)
14508 intel_dp_init(dev, PCH_DP_D, PORT_D); 14703 intel_dp_init(dev, PCH_DP_D, PORT_D);
14509 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 14704 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
14705 bool has_edp;
14706
14510 /* 14707 /*
14511 * The DP_DETECTED bit is the latched state of the DDC 14708 * The DP_DETECTED bit is the latched state of the DDC
14512 * SDA pin at boot. However since eDP doesn't require DDC 14709 * SDA pin at boot. However since eDP doesn't require DDC
@@ -14516,19 +14713,17 @@ static void intel_setup_outputs(struct drm_device *dev)
14516 * eDP ports. Consult the VBT as well as DP_DETECTED to 14713 * eDP ports. Consult the VBT as well as DP_DETECTED to
14517 * detect eDP ports. 14714 * detect eDP ports.
14518 */ 14715 */
14519 if (I915_READ(VLV_HDMIB) & SDVO_DETECTED && 14716 has_edp = intel_dp_is_edp(dev, PORT_B);
14520 !intel_dp_is_edp(dev, PORT_B)) 14717 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_edp)
14718 has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
14719 if (I915_READ(VLV_HDMIB) & SDVO_DETECTED && !has_edp)
14521 intel_hdmi_init(dev, VLV_HDMIB, PORT_B); 14720 intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
14522 if (I915_READ(VLV_DP_B) & DP_DETECTED ||
14523 intel_dp_is_edp(dev, PORT_B))
14524 intel_dp_init(dev, VLV_DP_B, PORT_B);
14525 14721
14526 if (I915_READ(VLV_HDMIC) & SDVO_DETECTED && 14722 has_edp = intel_dp_is_edp(dev, PORT_C);
14527 !intel_dp_is_edp(dev, PORT_C)) 14723 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_edp)
14724 has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
14725 if (I915_READ(VLV_HDMIC) & SDVO_DETECTED && !has_edp)
14528 intel_hdmi_init(dev, VLV_HDMIC, PORT_C); 14726 intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
14529 if (I915_READ(VLV_DP_C) & DP_DETECTED ||
14530 intel_dp_is_edp(dev, PORT_C))
14531 intel_dp_init(dev, VLV_DP_C, PORT_C);
14532 14727
14533 if (IS_CHERRYVIEW(dev)) { 14728 if (IS_CHERRYVIEW(dev)) {
14534 /* eDP not supported on port D, so don't check VBT */ 14729 /* eDP not supported on port D, so don't check VBT */
@@ -15020,6 +15215,11 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
15020 broxton_modeset_commit_cdclk; 15215 broxton_modeset_commit_cdclk;
15021 dev_priv->display.modeset_calc_cdclk = 15216 dev_priv->display.modeset_calc_cdclk =
15022 broxton_modeset_calc_cdclk; 15217 broxton_modeset_calc_cdclk;
15218 } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
15219 dev_priv->display.modeset_commit_cdclk =
15220 skl_modeset_commit_cdclk;
15221 dev_priv->display.modeset_calc_cdclk =
15222 skl_modeset_calc_cdclk;
15023 } 15223 }
15024 15224
15025 switch (INTEL_INFO(dev_priv)->gen) { 15225 switch (INTEL_INFO(dev_priv)->gen) {
@@ -15418,6 +15618,9 @@ void intel_modeset_init(struct drm_device *dev)
15418 15618
15419 intel_shared_dpll_init(dev); 15619 intel_shared_dpll_init(dev);
15420 15620
15621 if (dev_priv->max_cdclk_freq == 0)
15622 intel_update_max_cdclk(dev);
15623
15421 /* Just disable it once at startup */ 15624 /* Just disable it once at startup */
15422 i915_disable_vga(dev); 15625 i915_disable_vga(dev);
15423 intel_setup_outputs(dev); 15626 intel_setup_outputs(dev);
@@ -15558,8 +15761,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
15558 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { 15761 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
15559 bool plane; 15762 bool plane;
15560 15763
15561 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", 15764 DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
15562 crtc->base.base.id); 15765 crtc->base.base.id, crtc->base.name);
15563 15766
15564 /* Pipe has the wrong plane attached and the plane is active. 15767 /* Pipe has the wrong plane attached and the plane is active.
15565 * Temporarily change the plane mapping and disable everything 15768 * Temporarily change the plane mapping and disable everything
@@ -15727,26 +15930,24 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
15727 if (crtc_state->base.active) { 15930 if (crtc_state->base.active) {
15728 dev_priv->active_crtcs |= 1 << crtc->pipe; 15931 dev_priv->active_crtcs |= 1 << crtc->pipe;
15729 15932
15730 if (IS_BROADWELL(dev_priv)) { 15933 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
15731 pixclk = ilk_pipe_pixel_rate(crtc_state); 15934 pixclk = ilk_pipe_pixel_rate(crtc_state);
15732 15935 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15733 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
15734 if (crtc_state->ips_enabled)
15735 pixclk = DIV_ROUND_UP(pixclk * 100, 95);
15736 } else if (IS_VALLEYVIEW(dev_priv) ||
15737 IS_CHERRYVIEW(dev_priv) ||
15738 IS_BROXTON(dev_priv))
15739 pixclk = crtc_state->base.adjusted_mode.crtc_clock; 15936 pixclk = crtc_state->base.adjusted_mode.crtc_clock;
15740 else 15937 else
15741 WARN_ON(dev_priv->display.modeset_calc_cdclk); 15938 WARN_ON(dev_priv->display.modeset_calc_cdclk);
15939
15940 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
15941 if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
15942 pixclk = DIV_ROUND_UP(pixclk * 100, 95);
15742 } 15943 }
15743 15944
15744 dev_priv->min_pixclk[crtc->pipe] = pixclk; 15945 dev_priv->min_pixclk[crtc->pipe] = pixclk;
15745 15946
15746 readout_plane_state(crtc); 15947 readout_plane_state(crtc);
15747 15948
15748 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", 15949 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
15749 crtc->base.base.id, 15950 crtc->base.base.id, crtc->base.name,
15750 crtc->active ? "enabled" : "disabled"); 15951 crtc->active ? "enabled" : "disabled");
15751 } 15952 }
15752 15953
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 2834ca5216b2..f97cd5305e4c 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1578,6 +1578,27 @@ found:
1578 &pipe_config->dp_m2_n2); 1578 &pipe_config->dp_m2_n2);
1579 } 1579 }
1580 1580
1581 /*
1582 * DPLL0 VCO may need to be adjusted to get the correct
1583 * clock for eDP. This will affect cdclk as well.
1584 */
1585 if (is_edp(intel_dp) &&
1586 (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) {
1587 int vco;
1588
1589 switch (pipe_config->port_clock / 2) {
1590 case 108000:
1591 case 216000:
1592 vco = 8640000;
1593 break;
1594 default:
1595 vco = 8100000;
1596 break;
1597 }
1598
1599 to_intel_atomic_state(pipe_config->base.state)->cdclk_pll_vco = vco;
1600 }
1601
1581 if (!HAS_DDI(dev)) 1602 if (!HAS_DDI(dev))
1582 intel_dp_set_clock(encoder, pipe_config); 1603 intel_dp_set_clock(encoder, pipe_config);
1583 1604
@@ -5349,8 +5370,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5349 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) { 5370 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5350 fixed_mode = drm_mode_duplicate(dev, 5371 fixed_mode = drm_mode_duplicate(dev,
5351 dev_priv->vbt.lfp_lvds_vbt_mode); 5372 dev_priv->vbt.lfp_lvds_vbt_mode);
5352 if (fixed_mode) 5373 if (fixed_mode) {
5353 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 5374 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5375 connector->display_info.width_mm = fixed_mode->width_mm;
5376 connector->display_info.height_mm = fixed_mode->height_mm;
5377 }
5354 } 5378 }
5355 mutex_unlock(&dev->mode_config.mutex); 5379 mutex_unlock(&dev->mode_config.mutex);
5356 5380
@@ -5547,9 +5571,9 @@ fail:
5547 return false; 5571 return false;
5548} 5572}
5549 5573
5550void 5574bool intel_dp_init(struct drm_device *dev,
5551intel_dp_init(struct drm_device *dev, 5575 i915_reg_t output_reg,
5552 i915_reg_t output_reg, enum port port) 5576 enum port port)
5553{ 5577{
5554 struct drm_i915_private *dev_priv = dev->dev_private; 5578 struct drm_i915_private *dev_priv = dev->dev_private;
5555 struct intel_digital_port *intel_dig_port; 5579 struct intel_digital_port *intel_dig_port;
@@ -5559,7 +5583,7 @@ intel_dp_init(struct drm_device *dev,
5559 5583
5560 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); 5584 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5561 if (!intel_dig_port) 5585 if (!intel_dig_port)
5562 return; 5586 return false;
5563 5587
5564 intel_connector = intel_connector_alloc(); 5588 intel_connector = intel_connector_alloc();
5565 if (!intel_connector) 5589 if (!intel_connector)
@@ -5569,7 +5593,7 @@ intel_dp_init(struct drm_device *dev,
5569 encoder = &intel_encoder->base; 5593 encoder = &intel_encoder->base;
5570 5594
5571 if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, 5595 if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5572 DRM_MODE_ENCODER_TMDS, NULL)) 5596 DRM_MODE_ENCODER_TMDS, "DP %c", port_name(port)))
5573 goto err_encoder_init; 5597 goto err_encoder_init;
5574 5598
5575 intel_encoder->compute_config = intel_dp_compute_config; 5599 intel_encoder->compute_config = intel_dp_compute_config;
@@ -5616,7 +5640,7 @@ intel_dp_init(struct drm_device *dev,
5616 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) 5640 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
5617 goto err_init_connector; 5641 goto err_init_connector;
5618 5642
5619 return; 5643 return true;
5620 5644
5621err_init_connector: 5645err_init_connector:
5622 drm_encoder_cleanup(encoder); 5646 drm_encoder_cleanup(encoder);
@@ -5624,8 +5648,7 @@ err_encoder_init:
5624 kfree(intel_connector); 5648 kfree(intel_connector);
5625err_connector_alloc: 5649err_connector_alloc:
5626 kfree(intel_dig_port); 5650 kfree(intel_dig_port);
5627 5651 return false;
5628 return;
5629} 5652}
5630 5653
5631void intel_dp_mst_suspend(struct drm_device *dev) 5654void intel_dp_mst_suspend(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 7a34090cef34..f62ca9a126b3 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -534,7 +534,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
534 intel_mst->primary = intel_dig_port; 534 intel_mst->primary = intel_dig_port;
535 535
536 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs, 536 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
537 DRM_MODE_ENCODER_DPMST, NULL); 537 DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe));
538 538
539 intel_encoder->type = INTEL_OUTPUT_DP_MST; 539 intel_encoder->type = INTEL_OUTPUT_DP_MST;
540 intel_encoder->crtc_mask = 0x7; 540 intel_encoder->crtc_mask = 0x7;
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index c283ba4babe8..c0eff1571731 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -208,8 +208,8 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
208 if (memcmp(&crtc_state->dpll_hw_state, 208 if (memcmp(&crtc_state->dpll_hw_state,
209 &shared_dpll[i].hw_state, 209 &shared_dpll[i].hw_state,
210 sizeof(crtc_state->dpll_hw_state)) == 0) { 210 sizeof(crtc_state->dpll_hw_state)) == 0) {
211 DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, active %x)\n", 211 DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
212 crtc->base.base.id, pll->name, 212 crtc->base.base.id, crtc->base.name, pll->name,
213 shared_dpll[i].crtc_mask, 213 shared_dpll[i].crtc_mask,
214 pll->active_mask); 214 pll->active_mask);
215 return pll; 215 return pll;
@@ -220,8 +220,8 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
220 for (i = range_min; i <= range_max; i++) { 220 for (i = range_min; i <= range_max; i++) {
221 pll = &dev_priv->shared_dplls[i]; 221 pll = &dev_priv->shared_dplls[i];
222 if (shared_dpll[i].crtc_mask == 0) { 222 if (shared_dpll[i].crtc_mask == 0) {
223 DRM_DEBUG_KMS("CRTC:%d allocated %s\n", 223 DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
224 crtc->base.base.id, pll->name); 224 crtc->base.base.id, crtc->base.name, pll->name);
225 return pll; 225 return pll;
226 } 226 }
227 } 227 }
@@ -358,14 +358,17 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
358 i = (enum intel_dpll_id) crtc->pipe; 358 i = (enum intel_dpll_id) crtc->pipe;
359 pll = &dev_priv->shared_dplls[i]; 359 pll = &dev_priv->shared_dplls[i];
360 360
361 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", 361 DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
362 crtc->base.base.id, pll->name); 362 crtc->base.base.id, crtc->base.name, pll->name);
363 } else { 363 } else {
364 pll = intel_find_shared_dpll(crtc, crtc_state, 364 pll = intel_find_shared_dpll(crtc, crtc_state,
365 DPLL_ID_PCH_PLL_A, 365 DPLL_ID_PCH_PLL_A,
366 DPLL_ID_PCH_PLL_B); 366 DPLL_ID_PCH_PLL_B);
367 } 367 }
368 368
369 if (!pll)
370 return NULL;
371
369 /* reference the pll */ 372 /* reference the pll */
370 intel_reference_shared_dpll(pll, crtc_state); 373 intel_reference_shared_dpll(pll, crtc_state);
371 374
@@ -1236,9 +1239,6 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
1236 case 162000: 1239 case 162000:
1237 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0); 1240 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1238 break; 1241 break;
1239 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1240 results in CDCLK change. Need to handle the change of CDCLK by
1241 disabling pipes and re-enabling them */
1242 case 108000: 1242 case 108000:
1243 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0); 1243 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1244 break; 1244 break;
@@ -1613,8 +1613,8 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
1613 i = (enum intel_dpll_id) intel_dig_port->port; 1613 i = (enum intel_dpll_id) intel_dig_port->port;
1614 pll = intel_get_shared_dpll_by_id(dev_priv, i); 1614 pll = intel_get_shared_dpll_by_id(dev_priv, i);
1615 1615
1616 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", 1616 DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
1617 crtc->base.base.id, pll->name); 1617 crtc->base.base.id, crtc->base.name, pll->name);
1618 1618
1619 intel_reference_shared_dpll(pll, crtc_state); 1619 intel_reference_shared_dpll(pll, crtc_state);
1620 1620
@@ -1633,18 +1633,10 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
1633static void intel_ddi_pll_init(struct drm_device *dev) 1633static void intel_ddi_pll_init(struct drm_device *dev)
1634{ 1634{
1635 struct drm_i915_private *dev_priv = dev->dev_private; 1635 struct drm_i915_private *dev_priv = dev->dev_private;
1636 uint32_t val = I915_READ(LCPLL_CTL); 1636
1637 1637 if (INTEL_GEN(dev_priv) < 9) {
1638 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 1638 uint32_t val = I915_READ(LCPLL_CTL);
1639 int cdclk_freq; 1639
1640
1641 cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
1642 dev_priv->skl_boot_cdclk = cdclk_freq;
1643 if (skl_sanitize_cdclk(dev_priv))
1644 DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n");
1645 if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
1646 DRM_ERROR("LCPLL1 is disabled\n");
1647 } else if (!IS_BROXTON(dev_priv)) {
1648 /* 1640 /*
1649 * The LCPLL register should be turned on by the BIOS. For now 1641 * The LCPLL register should be turned on by the BIOS. For now
1650 * let's just check its state and print errors in case 1642 * let's just check its state and print errors in case
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 97de5e05890a..ebe7b3427e2e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -304,6 +304,9 @@ struct intel_atomic_state {
304 unsigned int active_crtcs; 304 unsigned int active_crtcs;
305 unsigned int min_pixclk[I915_MAX_PIPES]; 305 unsigned int min_pixclk[I915_MAX_PIPES];
306 306
307 /* SKL/KBL Only */
308 unsigned int cdclk_pll_vco;
309
307 struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS]; 310 struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
308 311
309 /* 312 /*
@@ -1133,6 +1136,7 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv);
1133void i915_audio_component_cleanup(struct drm_i915_private *dev_priv); 1136void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
1134 1137
1135/* intel_display.c */ 1138/* intel_display.c */
1139void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco);
1136void intel_update_rawclk(struct drm_i915_private *dev_priv); 1140void intel_update_rawclk(struct drm_i915_private *dev_priv);
1137int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 1141int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
1138 const char *name, u32 reg, int ref_freq); 1142 const char *name, u32 reg, int ref_freq);
@@ -1259,7 +1263,6 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv);
1259void hsw_disable_pc8(struct drm_i915_private *dev_priv); 1263void hsw_disable_pc8(struct drm_i915_private *dev_priv);
1260void broxton_init_cdclk(struct drm_i915_private *dev_priv); 1264void broxton_init_cdclk(struct drm_i915_private *dev_priv);
1261void broxton_uninit_cdclk(struct drm_i915_private *dev_priv); 1265void broxton_uninit_cdclk(struct drm_i915_private *dev_priv);
1262bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv);
1263void broxton_ddi_phy_init(struct drm_i915_private *dev_priv); 1266void broxton_ddi_phy_init(struct drm_i915_private *dev_priv);
1264void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv); 1267void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv);
1265void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv); 1268void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv);
@@ -1268,8 +1271,8 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv);
1268void bxt_disable_dc9(struct drm_i915_private *dev_priv); 1271void bxt_disable_dc9(struct drm_i915_private *dev_priv);
1269void gen9_enable_dc5(struct drm_i915_private *dev_priv); 1272void gen9_enable_dc5(struct drm_i915_private *dev_priv);
1270void skl_init_cdclk(struct drm_i915_private *dev_priv); 1273void skl_init_cdclk(struct drm_i915_private *dev_priv);
1271int skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
1272void skl_uninit_cdclk(struct drm_i915_private *dev_priv); 1274void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
1275unsigned int skl_cdclk_get_vco(unsigned int freq);
1273void skl_enable_dc6(struct drm_i915_private *dev_priv); 1276void skl_enable_dc6(struct drm_i915_private *dev_priv);
1274void skl_disable_dc6(struct drm_i915_private *dev_priv); 1277void skl_disable_dc6(struct drm_i915_private *dev_priv);
1275void intel_dp_get_m_n(struct intel_crtc *crtc, 1278void intel_dp_get_m_n(struct intel_crtc *crtc,
@@ -1309,7 +1312,7 @@ void intel_csr_ucode_suspend(struct drm_i915_private *);
1309void intel_csr_ucode_resume(struct drm_i915_private *); 1312void intel_csr_ucode_resume(struct drm_i915_private *);
1310 1313
1311/* intel_dp.c */ 1314/* intel_dp.c */
1312void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port); 1315bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
1313bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 1316bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
1314 struct intel_connector *intel_connector); 1317 struct intel_connector *intel_connector);
1315void intel_dp_set_link_params(struct intel_dp *intel_dp, 1318void intel_dp_set_link_params(struct intel_dp *intel_dp,
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 4009618a5b34..c70132aa91d5 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -1450,7 +1450,7 @@ void intel_dsi_init(struct drm_device *dev)
1450 connector = &intel_connector->base; 1450 connector = &intel_connector->base;
1451 1451
1452 drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI, 1452 drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI,
1453 NULL); 1453 "DSI %c", port_name(port));
1454 1454
1455 intel_encoder->compute_config = intel_dsi_compute_config; 1455 intel_encoder->compute_config = intel_dsi_compute_config;
1456 intel_encoder->pre_enable = intel_dsi_pre_enable; 1456 intel_encoder->pre_enable = intel_dsi_pre_enable;
@@ -1578,6 +1578,9 @@ void intel_dsi_init(struct drm_device *dev)
1578 goto err; 1578 goto err;
1579 } 1579 }
1580 1580
1581 connector->display_info.width_mm = fixed_mode->width_mm;
1582 connector->display_info.height_mm = fixed_mode->height_mm;
1583
1581 intel_panel_init(&intel_connector->panel, fixed_mode, NULL); 1584 intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
1582 1585
1583 intel_dsi_add_properties(intel_connector); 1586 intel_dsi_add_properties(intel_connector);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 286baec979c8..a456f2eb68b6 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -406,6 +406,18 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
406 return mode; 406 return mode;
407} 407}
408 408
409static char intel_dvo_port_name(i915_reg_t dvo_reg)
410{
411 if (i915_mmio_reg_equal(dvo_reg, DVOA))
412 return 'A';
413 else if (i915_mmio_reg_equal(dvo_reg, DVOB))
414 return 'B';
415 else if (i915_mmio_reg_equal(dvo_reg, DVOC))
416 return 'C';
417 else
418 return '?';
419}
420
409void intel_dvo_init(struct drm_device *dev) 421void intel_dvo_init(struct drm_device *dev)
410{ 422{
411 struct drm_i915_private *dev_priv = dev->dev_private; 423 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -428,8 +440,6 @@ void intel_dvo_init(struct drm_device *dev)
428 intel_dvo->attached_connector = intel_connector; 440 intel_dvo->attached_connector = intel_connector;
429 441
430 intel_encoder = &intel_dvo->base; 442 intel_encoder = &intel_dvo->base;
431 drm_encoder_init(dev, &intel_encoder->base,
432 &intel_dvo_enc_funcs, encoder_type, NULL);
433 443
434 intel_encoder->disable = intel_disable_dvo; 444 intel_encoder->disable = intel_disable_dvo;
435 intel_encoder->enable = intel_enable_dvo; 445 intel_encoder->enable = intel_enable_dvo;
@@ -496,6 +506,10 @@ void intel_dvo_init(struct drm_device *dev)
496 if (!dvoinit) 506 if (!dvoinit)
497 continue; 507 continue;
498 508
509 drm_encoder_init(dev, &intel_encoder->base,
510 &intel_dvo_enc_funcs, encoder_type,
511 "DVO %c", intel_dvo_port_name(dvo->dvo_reg));
512
499 intel_encoder->type = INTEL_OUTPUT_DVO; 513 intel_encoder->type = INTEL_OUTPUT_DVO;
500 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 514 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
501 switch (dvo->type) { 515 switch (dvo->type) {
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 99e27530e264..ef8e67690f3d 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -490,10 +490,10 @@ retry:
490 } 490 }
491 crtcs[i] = new_crtc; 491 crtcs[i] = new_crtc;
492 492
493 DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n", 493 DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n",
494 connector->name, 494 connector->name,
495 pipe_name(to_intel_crtc(connector->state->crtc)->pipe),
496 connector->state->crtc->base.id, 495 connector->state->crtc->base.id,
496 connector->state->crtc->name,
497 modes[i]->hdisplay, modes[i]->vdisplay, 497 modes[i]->hdisplay, modes[i]->vdisplay,
498 modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :""); 498 modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
499 499
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 9d79c4c3e256..41601c71f529 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -48,14 +48,23 @@ struct drm_i915_gem_request;
48 * queue (a circular array of work items), again described in the process 48 * queue (a circular array of work items), again described in the process
49 * descriptor. Work queue pages are mapped momentarily as required. 49 * descriptor. Work queue pages are mapped momentarily as required.
50 * 50 *
51 * Finally, we also keep a few statistics here, including the number of 51 * We also keep a few statistics on failures. Ideally, these should all
52 * submissions to each engine, and a record of the last submission failure 52 * be zero!
53 * (if any). 53 * no_wq_space: times that the submission pre-check found no space was
54 * available in the work queue (note, the queue is shared,
55 * not per-engine). It is OK for this to be nonzero, but
56 * it should not be huge!
57 * q_fail: failed to enqueue a work item. This should never happen,
58 * because we check for space beforehand.
59 * b_fail: failed to ring the doorbell. This should never happen, unless
60 * somehow the hardware misbehaves, or maybe if the GuC firmware
61 * crashes? We probably need to reset the GPU to recover.
62 * retcode: errno from last guc_submit()
54 */ 63 */
55struct i915_guc_client { 64struct i915_guc_client {
56 struct drm_i915_gem_object *client_obj; 65 struct drm_i915_gem_object *client_obj;
57 void *client_base; /* first page (only) of above */ 66 void *client_base; /* first page (only) of above */
58 struct intel_context *owner; 67 struct i915_gem_context *owner;
59 struct intel_guc *guc; 68 struct intel_guc *guc;
60 uint32_t priority; 69 uint32_t priority;
61 uint32_t ctx_index; 70 uint32_t ctx_index;
@@ -71,12 +80,13 @@ struct i915_guc_client {
71 uint32_t wq_tail; 80 uint32_t wq_tail;
72 uint32_t unused; /* Was 'wq_head' */ 81 uint32_t unused; /* Was 'wq_head' */
73 82
74 /* GuC submission statistics & status */ 83 uint32_t no_wq_space;
75 uint64_t submissions[GUC_MAX_ENGINES_NUM]; 84 uint32_t q_fail; /* No longer used */
76 uint32_t q_fail;
77 uint32_t b_fail; 85 uint32_t b_fail;
78 int retcode; 86 int retcode;
79 int spare; /* pad to 32 DWords */ 87
88 /* Per-engine counts of GuC submissions */
89 uint64_t submissions[GUC_MAX_ENGINES_NUM];
80}; 90};
81 91
82enum intel_guc_fw_status { 92enum intel_guc_fw_status {
@@ -138,9 +148,9 @@ struct intel_guc {
138}; 148};
139 149
140/* intel_guc_loader.c */ 150/* intel_guc_loader.c */
141extern void intel_guc_ucode_init(struct drm_device *dev); 151extern void intel_guc_init(struct drm_device *dev);
142extern int intel_guc_ucode_load(struct drm_device *dev); 152extern int intel_guc_setup(struct drm_device *dev);
143extern void intel_guc_ucode_fini(struct drm_device *dev); 153extern void intel_guc_fini(struct drm_device *dev);
144extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status); 154extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status);
145extern int intel_guc_suspend(struct drm_device *dev); 155extern int intel_guc_suspend(struct drm_device *dev);
146extern int intel_guc_resume(struct drm_device *dev); 156extern int intel_guc_resume(struct drm_device *dev);
@@ -148,10 +158,9 @@ extern int intel_guc_resume(struct drm_device *dev);
148/* i915_guc_submission.c */ 158/* i915_guc_submission.c */
149int i915_guc_submission_init(struct drm_device *dev); 159int i915_guc_submission_init(struct drm_device *dev);
150int i915_guc_submission_enable(struct drm_device *dev); 160int i915_guc_submission_enable(struct drm_device *dev);
151int i915_guc_submit(struct i915_guc_client *client, 161int i915_guc_wq_check_space(struct drm_i915_gem_request *rq);
152 struct drm_i915_gem_request *rq); 162int i915_guc_submit(struct drm_i915_gem_request *rq);
153void i915_guc_submission_disable(struct drm_device *dev); 163void i915_guc_submission_disable(struct drm_device *dev);
154void i915_guc_submission_fini(struct drm_device *dev); 164void i915_guc_submission_fini(struct drm_device *dev);
155int i915_guc_wq_check_space(struct i915_guc_client *client);
156 165
157#endif 166#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 2de57ffe5e18..944786d7075b 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -71,7 +71,8 @@
71#define WQ_WORKLOAD_TOUCH (2 << WQ_WORKLOAD_SHIFT) 71#define WQ_WORKLOAD_TOUCH (2 << WQ_WORKLOAD_SHIFT)
72 72
73#define WQ_RING_TAIL_SHIFT 20 73#define WQ_RING_TAIL_SHIFT 20
74#define WQ_RING_TAIL_MASK (0x7FF << WQ_RING_TAIL_SHIFT) 74#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */
75#define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT)
75 76
76#define GUC_DOORBELL_ENABLED 1 77#define GUC_DOORBELL_ENABLED 1
77#define GUC_DOORBELL_DISABLED 0 78#define GUC_DOORBELL_DISABLED 0
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 34405de72dfa..f2b88c7209cb 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -103,6 +103,7 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
103{ 103{
104 struct intel_engine_cs *engine; 104 struct intel_engine_cs *engine;
105 int irqs; 105 int irqs;
106 u32 tmp;
106 107
107 /* tell all command streamers to forward interrupts and vblank to GuC */ 108 /* tell all command streamers to forward interrupts and vblank to GuC */
108 irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS); 109 irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
@@ -117,6 +118,16 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
117 I915_WRITE(GUC_BCS_RCS_IER, ~irqs); 118 I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
118 I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs); 119 I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
119 I915_WRITE(GUC_WD_VECS_IER, ~irqs); 120 I915_WRITE(GUC_WD_VECS_IER, ~irqs);
121
122 /*
123 * If GuC has routed PM interrupts to itself, don't keep it.
124 * and keep other interrupts those are unmasked by GuC.
125 */
126 tmp = I915_READ(GEN6_PMINTRMSK);
127 if (tmp & GEN8_PMINTR_REDIRECT_TO_NON_DISP) {
128 dev_priv->rps.pm_intr_keep |= ~(tmp & ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
129 dev_priv->rps.pm_intr_keep &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
130 }
120} 131}
121 132
122static u32 get_gttype(struct drm_i915_private *dev_priv) 133static u32 get_gttype(struct drm_i915_private *dev_priv)
@@ -386,65 +397,58 @@ static int i915_reset_guc(struct drm_i915_private *dev_priv)
386} 397}
387 398
388/** 399/**
389 * intel_guc_ucode_load() - load GuC uCode into the device 400 * intel_guc_setup() - finish preparing the GuC for activity
390 * @dev: drm device 401 * @dev: drm device
391 * 402 *
392 * Called from gem_init_hw() during driver loading and also after a GPU reset. 403 * Called from gem_init_hw() during driver loading and also after a GPU reset.
393 * 404 *
405 * The main action required here it to load the GuC uCode into the device.
394 * The firmware image should have already been fetched into memory by the 406 * The firmware image should have already been fetched into memory by the
395 * earlier call to intel_guc_ucode_init(), so here we need only check that 407 * earlier call to intel_guc_init(), so here we need only check that worked,
396 * is succeeded, and then transfer the image to the h/w. 408 * and then transfer the image to the h/w.
397 * 409 *
398 * Return: non-zero code on error 410 * Return: non-zero code on error
399 */ 411 */
400int intel_guc_ucode_load(struct drm_device *dev) 412int intel_guc_setup(struct drm_device *dev)
401{ 413{
402 struct drm_i915_private *dev_priv = dev->dev_private; 414 struct drm_i915_private *dev_priv = dev->dev_private;
403 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; 415 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
404 int retries, err = 0; 416 const char *fw_path = guc_fw->guc_fw_path;
417 int retries, ret, err;
405 418
406 if (!i915.enable_guc_submission) 419 DRM_DEBUG_DRIVER("GuC fw status: path %s, fetch %s, load %s\n",
407 return 0; 420 fw_path,
408
409 DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
410 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status), 421 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
411 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); 422 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
412 423
413 direct_interrupts_to_host(dev_priv); 424 /* Loading forbidden, or no firmware to load? */
414 425 if (!i915.enable_guc_loading) {
415 if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_NONE) 426 err = 0;
416 return 0; 427 goto fail;
417 428 } else if (fw_path == NULL || *fw_path == '\0') {
418 if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_SUCCESS && 429 if (*fw_path == '\0')
419 guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL) 430 DRM_INFO("No GuC firmware known for this platform\n");
420 return -ENOEXEC; 431 err = -ENODEV;
421 432 goto fail;
422 guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING; 433 }
423
424 DRM_DEBUG_DRIVER("GuC fw fetch status %s\n",
425 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
426 434
427 switch (guc_fw->guc_fw_fetch_status) { 435 /* Fetch failed, or already fetched but failed to load? */
428 case GUC_FIRMWARE_FAIL: 436 if (guc_fw->guc_fw_fetch_status != GUC_FIRMWARE_SUCCESS) {
429 /* something went wrong :( */
430 err = -EIO; 437 err = -EIO;
431 goto fail; 438 goto fail;
432 439 } else if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL) {
433 case GUC_FIRMWARE_NONE: 440 err = -ENOEXEC;
434 case GUC_FIRMWARE_PENDING:
435 default:
436 /* "can't happen" */
437 WARN_ONCE(1, "GuC fw %s invalid guc_fw_fetch_status %s [%d]\n",
438 guc_fw->guc_fw_path,
439 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
440 guc_fw->guc_fw_fetch_status);
441 err = -ENXIO;
442 goto fail; 441 goto fail;
443
444 case GUC_FIRMWARE_SUCCESS:
445 break;
446 } 442 }
447 443
444 direct_interrupts_to_host(dev_priv);
445
446 guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
447
448 DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
449 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
450 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
451
448 err = i915_guc_submission_init(dev); 452 err = i915_guc_submission_init(dev);
449 if (err) 453 if (err)
450 goto fail; 454 goto fail;
@@ -462,7 +466,7 @@ int intel_guc_ucode_load(struct drm_device *dev)
462 */ 466 */
463 err = i915_reset_guc(dev_priv); 467 err = i915_reset_guc(dev_priv);
464 if (err) { 468 if (err) {
465 DRM_ERROR("GuC reset failed, err %d\n", err); 469 DRM_ERROR("GuC reset failed: %d\n", err);
466 goto fail; 470 goto fail;
467 } 471 }
468 472
@@ -473,8 +477,8 @@ int intel_guc_ucode_load(struct drm_device *dev)
473 if (--retries == 0) 477 if (--retries == 0)
474 goto fail; 478 goto fail;
475 479
476 DRM_INFO("GuC fw load failed, err %d; will reset and " 480 DRM_INFO("GuC fw load failed: %d; will reset and "
477 "retry %d more time(s)\n", err, retries); 481 "retry %d more time(s)\n", err, retries);
478 } 482 }
479 483
480 guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS; 484 guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
@@ -496,7 +500,6 @@ int intel_guc_ucode_load(struct drm_device *dev)
496 return 0; 500 return 0;
497 501
498fail: 502fail:
499 DRM_ERROR("GuC firmware load failed, err %d\n", err);
500 if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING) 503 if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
501 guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL; 504 guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
502 505
@@ -504,7 +507,41 @@ fail:
504 i915_guc_submission_disable(dev); 507 i915_guc_submission_disable(dev);
505 i915_guc_submission_fini(dev); 508 i915_guc_submission_fini(dev);
506 509
507 return err; 510 /*
511 * We've failed to load the firmware :(
512 *
513 * Decide whether to disable GuC submission and fall back to
514 * execlist mode, and whether to hide the error by returning
515 * zero or to return -EIO, which the caller will treat as a
516 * nonfatal error (i.e. it doesn't prevent driver load, but
517 * marks the GPU as wedged until reset).
518 */
519 if (i915.enable_guc_loading > 1) {
520 ret = -EIO;
521 } else if (i915.enable_guc_submission > 1) {
522 ret = -EIO;
523 } else {
524 ret = 0;
525 }
526
527 if (err == 0)
528 DRM_INFO("GuC firmware load skipped\n");
529 else if (ret == -EIO)
530 DRM_ERROR("GuC firmware load failed: %d\n", err);
531 else
532 DRM_INFO("GuC firmware load failed: %d\n", err);
533
534 if (i915.enable_guc_submission) {
535 if (fw_path == NULL)
536 DRM_INFO("GuC submission without firmware not supported\n");
537 if (ret == 0)
538 DRM_INFO("Falling back to execlist mode\n");
539 else
540 DRM_ERROR("GuC init failed: %d\n", ret);
541 }
542 i915.enable_guc_submission = 0;
543
544 return ret;
508} 545}
509 546
510static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw) 547static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
@@ -629,22 +666,25 @@ fail:
629} 666}
630 667
631/** 668/**
632 * intel_guc_ucode_init() - define parameters and fetch firmware 669 * intel_guc_init() - define parameters and fetch firmware
633 * @dev: drm device 670 * @dev: drm device
634 * 671 *
635 * Called early during driver load, but after GEM is initialised. 672 * Called early during driver load, but after GEM is initialised.
636 * 673 *
637 * The firmware will be transferred to the GuC's memory later, 674 * The firmware will be transferred to the GuC's memory later,
638 * when intel_guc_ucode_load() is called. 675 * when intel_guc_setup() is called.
639 */ 676 */
640void intel_guc_ucode_init(struct drm_device *dev) 677void intel_guc_init(struct drm_device *dev)
641{ 678{
642 struct drm_i915_private *dev_priv = dev->dev_private; 679 struct drm_i915_private *dev_priv = dev->dev_private;
643 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; 680 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
644 const char *fw_path; 681 const char *fw_path;
645 682
646 if (!HAS_GUC_SCHED(dev)) 683 /* A negative value means "use platform default" */
647 i915.enable_guc_submission = false; 684 if (i915.enable_guc_loading < 0)
685 i915.enable_guc_loading = HAS_GUC_UCODE(dev);
686 if (i915.enable_guc_submission < 0)
687 i915.enable_guc_submission = HAS_GUC_SCHED(dev);
648 688
649 if (!HAS_GUC_UCODE(dev)) { 689 if (!HAS_GUC_UCODE(dev)) {
650 fw_path = NULL; 690 fw_path = NULL;
@@ -657,26 +697,21 @@ void intel_guc_ucode_init(struct drm_device *dev)
657 guc_fw->guc_fw_major_wanted = 8; 697 guc_fw->guc_fw_major_wanted = 8;
658 guc_fw->guc_fw_minor_wanted = 7; 698 guc_fw->guc_fw_minor_wanted = 7;
659 } else { 699 } else {
660 i915.enable_guc_submission = false;
661 fw_path = ""; /* unknown device */ 700 fw_path = ""; /* unknown device */
662 } 701 }
663 702
664 if (!i915.enable_guc_submission)
665 return;
666
667 guc_fw->guc_dev = dev; 703 guc_fw->guc_dev = dev;
668 guc_fw->guc_fw_path = fw_path; 704 guc_fw->guc_fw_path = fw_path;
669 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE; 705 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
670 guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE; 706 guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE;
671 707
708 /* Early (and silent) return if GuC loading is disabled */
709 if (!i915.enable_guc_loading)
710 return;
672 if (fw_path == NULL) 711 if (fw_path == NULL)
673 return; 712 return;
674 713 if (*fw_path == '\0')
675 if (*fw_path == '\0') {
676 DRM_ERROR("No GuC firmware known for this platform\n");
677 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
678 return; 714 return;
679 }
680 715
681 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING; 716 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING;
682 DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path); 717 DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
@@ -685,10 +720,10 @@ void intel_guc_ucode_init(struct drm_device *dev)
685} 720}
686 721
687/** 722/**
688 * intel_guc_ucode_fini() - clean up all allocated resources 723 * intel_guc_fini() - clean up all allocated resources
689 * @dev: drm device 724 * @dev: drm device
690 */ 725 */
691void intel_guc_ucode_fini(struct drm_device *dev) 726void intel_guc_fini(struct drm_device *dev)
692{ 727{
693 struct drm_i915_private *dev_priv = dev->dev_private; 728 struct drm_i915_private *dev_priv = dev->dev_private;
694 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; 729 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 6b52c6accf6a..eb455ea6ea92 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1945,7 +1945,7 @@ void intel_hdmi_init(struct drm_device *dev,
1945 intel_encoder = &intel_dig_port->base; 1945 intel_encoder = &intel_dig_port->base;
1946 1946
1947 drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, 1947 drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
1948 DRM_MODE_ENCODER_TMDS, NULL); 1948 DRM_MODE_ENCODER_TMDS, "HDMI %c", port_name(port));
1949 1949
1950 intel_encoder->compute_config = intel_hdmi_compute_config; 1950 intel_encoder->compute_config = intel_hdmi_compute_config;
1951 if (HAS_PCH_SPLIT(dev)) { 1951 if (HAS_PCH_SPLIT(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index db10c961e0f4..5c191a1afaaf 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -231,9 +231,9 @@ enum {
231/* Typical size of the average request (2 pipecontrols and a MI_BB) */ 231/* Typical size of the average request (2 pipecontrols and a MI_BB) */
232#define EXECLISTS_REQUEST_SIZE 64 /* bytes */ 232#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
233 233
234static int execlists_context_deferred_alloc(struct intel_context *ctx, 234static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
235 struct intel_engine_cs *engine); 235 struct intel_engine_cs *engine);
236static int intel_lr_context_pin(struct intel_context *ctx, 236static int intel_lr_context_pin(struct i915_gem_context *ctx,
237 struct intel_engine_cs *engine); 237 struct intel_engine_cs *engine);
238 238
239/** 239/**
@@ -302,7 +302,7 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
302 * descriptor for a pinned context 302 * descriptor for a pinned context
303 * 303 *
304 * @ctx: Context to work on 304 * @ctx: Context to work on
305 * @ring: Engine the descriptor will be used with 305 * @engine: Engine the descriptor will be used with
306 * 306 *
307 * The context descriptor encodes various attributes of a context, 307 * The context descriptor encodes various attributes of a context,
308 * including its GTT address and some flags. Because it's fairly 308 * including its GTT address and some flags. Because it's fairly
@@ -317,22 +317,23 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
317 * bits 55-63: group ID, currently unused and set to 0 317 * bits 55-63: group ID, currently unused and set to 0
318 */ 318 */
319static void 319static void
320intel_lr_context_descriptor_update(struct intel_context *ctx, 320intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
321 struct intel_engine_cs *engine) 321 struct intel_engine_cs *engine)
322{ 322{
323 struct intel_context *ce = &ctx->engine[engine->id];
323 u64 desc; 324 u64 desc;
324 325
325 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH)); 326 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
326 327
327 desc = engine->ctx_desc_template; /* bits 0-11 */ 328 desc = engine->ctx_desc_template; /* bits 0-11 */
328 desc |= ctx->engine[engine->id].lrc_vma->node.start + /* bits 12-31 */ 329 desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
329 LRC_PPHWSP_PN * PAGE_SIZE; 330 /* bits 12-31 */
330 desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */ 331 desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
331 332
332 ctx->engine[engine->id].lrc_desc = desc; 333 ce->lrc_desc = desc;
333} 334}
334 335
335uint64_t intel_lr_context_descriptor(struct intel_context *ctx, 336uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
336 struct intel_engine_cs *engine) 337 struct intel_engine_cs *engine)
337{ 338{
338 return ctx->engine[engine->id].lrc_desc; 339 return ctx->engine[engine->id].lrc_desc;
@@ -676,6 +677,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
676int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) 677int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
677{ 678{
678 struct intel_engine_cs *engine = request->engine; 679 struct intel_engine_cs *engine = request->engine;
680 struct intel_context *ce = &request->ctx->engine[engine->id];
679 int ret; 681 int ret;
680 682
681 /* Flush enough space to reduce the likelihood of waiting after 683 /* Flush enough space to reduce the likelihood of waiting after
@@ -684,13 +686,13 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
684 */ 686 */
685 request->reserved_space += EXECLISTS_REQUEST_SIZE; 687 request->reserved_space += EXECLISTS_REQUEST_SIZE;
686 688
687 if (request->ctx->engine[engine->id].state == NULL) { 689 if (!ce->state) {
688 ret = execlists_context_deferred_alloc(request->ctx, engine); 690 ret = execlists_context_deferred_alloc(request->ctx, engine);
689 if (ret) 691 if (ret)
690 return ret; 692 return ret;
691 } 693 }
692 694
693 request->ringbuf = request->ctx->engine[engine->id].ringbuf; 695 request->ringbuf = ce->ringbuf;
694 696
695 if (i915.enable_guc_submission) { 697 if (i915.enable_guc_submission) {
696 /* 698 /*
@@ -698,9 +700,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
698 * going any further, as the i915_add_request() call 700 * going any further, as the i915_add_request() call
699 * later on mustn't fail ... 701 * later on mustn't fail ...
700 */ 702 */
701 struct intel_guc *guc = &request->i915->guc; 703 ret = i915_guc_wq_check_space(request);
702
703 ret = i915_guc_wq_check_space(guc->execbuf_client);
704 if (ret) 704 if (ret)
705 return ret; 705 return ret;
706 } 706 }
@@ -713,12 +713,12 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
713 if (ret) 713 if (ret)
714 goto err_unpin; 714 goto err_unpin;
715 715
716 if (!request->ctx->engine[engine->id].initialised) { 716 if (!ce->initialised) {
717 ret = engine->init_context(request); 717 ret = engine->init_context(request);
718 if (ret) 718 if (ret)
719 goto err_unpin; 719 goto err_unpin;
720 720
721 request->ctx->engine[engine->id].initialised = true; 721 ce->initialised = true;
722 } 722 }
723 723
724 /* Note that after this point, we have committed to using 724 /* Note that after this point, we have committed to using
@@ -749,7 +749,6 @@ static int
749intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) 749intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
750{ 750{
751 struct intel_ringbuffer *ringbuf = request->ringbuf; 751 struct intel_ringbuffer *ringbuf = request->ringbuf;
752 struct drm_i915_private *dev_priv = request->i915;
753 struct intel_engine_cs *engine = request->engine; 752 struct intel_engine_cs *engine = request->engine;
754 753
755 intel_logical_ring_advance(ringbuf); 754 intel_logical_ring_advance(ringbuf);
@@ -777,8 +776,8 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
777 request->previous_context = engine->last_context; 776 request->previous_context = engine->last_context;
778 engine->last_context = request->ctx; 777 engine->last_context = request->ctx;
779 778
780 if (dev_priv->guc.execbuf_client) 779 if (i915.enable_guc_submission)
781 i915_guc_submit(dev_priv->guc.execbuf_client, request); 780 i915_guc_submit(request);
782 else 781 else
783 execlists_context_queue(request); 782 execlists_context_queue(request);
784 783
@@ -934,28 +933,26 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
934 return 0; 933 return 0;
935} 934}
936 935
937static int intel_lr_context_pin(struct intel_context *ctx, 936static int intel_lr_context_pin(struct i915_gem_context *ctx,
938 struct intel_engine_cs *engine) 937 struct intel_engine_cs *engine)
939{ 938{
940 struct drm_i915_private *dev_priv = ctx->i915; 939 struct drm_i915_private *dev_priv = ctx->i915;
941 struct drm_i915_gem_object *ctx_obj; 940 struct intel_context *ce = &ctx->engine[engine->id];
942 struct intel_ringbuffer *ringbuf;
943 void *vaddr; 941 void *vaddr;
944 u32 *lrc_reg_state; 942 u32 *lrc_reg_state;
945 int ret; 943 int ret;
946 944
947 lockdep_assert_held(&ctx->i915->dev->struct_mutex); 945 lockdep_assert_held(&ctx->i915->dev->struct_mutex);
948 946
949 if (ctx->engine[engine->id].pin_count++) 947 if (ce->pin_count++)
950 return 0; 948 return 0;
951 949
952 ctx_obj = ctx->engine[engine->id].state; 950 ret = i915_gem_obj_ggtt_pin(ce->state, GEN8_LR_CONTEXT_ALIGN,
953 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 951 PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
954 PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
955 if (ret) 952 if (ret)
956 goto err; 953 goto err;
957 954
958 vaddr = i915_gem_object_pin_map(ctx_obj); 955 vaddr = i915_gem_object_pin_map(ce->state);
959 if (IS_ERR(vaddr)) { 956 if (IS_ERR(vaddr)) {
960 ret = PTR_ERR(vaddr); 957 ret = PTR_ERR(vaddr);
961 goto unpin_ctx_obj; 958 goto unpin_ctx_obj;
@@ -963,17 +960,17 @@ static int intel_lr_context_pin(struct intel_context *ctx,
963 960
964 lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; 961 lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
965 962
966 ringbuf = ctx->engine[engine->id].ringbuf; 963 ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ringbuf);
967 ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
968 if (ret) 964 if (ret)
969 goto unpin_map; 965 goto unpin_map;
970 966
971 i915_gem_context_reference(ctx); 967 i915_gem_context_reference(ctx);
972 ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj); 968 ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state);
973 intel_lr_context_descriptor_update(ctx, engine); 969 intel_lr_context_descriptor_update(ctx, engine);
974 lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start; 970
975 ctx->engine[engine->id].lrc_reg_state = lrc_reg_state; 971 lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ringbuf->vma->node.start;
976 ctx_obj->dirty = true; 972 ce->lrc_reg_state = lrc_reg_state;
973 ce->state->dirty = true;
977 974
978 /* Invalidate GuC TLB. */ 975 /* Invalidate GuC TLB. */
979 if (i915.enable_guc_submission) 976 if (i915.enable_guc_submission)
@@ -982,34 +979,33 @@ static int intel_lr_context_pin(struct intel_context *ctx,
982 return 0; 979 return 0;
983 980
984unpin_map: 981unpin_map:
985 i915_gem_object_unpin_map(ctx_obj); 982 i915_gem_object_unpin_map(ce->state);
986unpin_ctx_obj: 983unpin_ctx_obj:
987 i915_gem_object_ggtt_unpin(ctx_obj); 984 i915_gem_object_ggtt_unpin(ce->state);
988err: 985err:
989 ctx->engine[engine->id].pin_count = 0; 986 ce->pin_count = 0;
990 return ret; 987 return ret;
991} 988}
992 989
993void intel_lr_context_unpin(struct intel_context *ctx, 990void intel_lr_context_unpin(struct i915_gem_context *ctx,
994 struct intel_engine_cs *engine) 991 struct intel_engine_cs *engine)
995{ 992{
996 struct drm_i915_gem_object *ctx_obj; 993 struct intel_context *ce = &ctx->engine[engine->id];
997 994
998 lockdep_assert_held(&ctx->i915->dev->struct_mutex); 995 lockdep_assert_held(&ctx->i915->dev->struct_mutex);
999 GEM_BUG_ON(ctx->engine[engine->id].pin_count == 0); 996 GEM_BUG_ON(ce->pin_count == 0);
1000 997
1001 if (--ctx->engine[engine->id].pin_count) 998 if (--ce->pin_count)
1002 return; 999 return;
1003 1000
1004 intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf); 1001 intel_unpin_ringbuffer_obj(ce->ringbuf);
1005 1002
1006 ctx_obj = ctx->engine[engine->id].state; 1003 i915_gem_object_unpin_map(ce->state);
1007 i915_gem_object_unpin_map(ctx_obj); 1004 i915_gem_object_ggtt_unpin(ce->state);
1008 i915_gem_object_ggtt_unpin(ctx_obj);
1009 1005
1010 ctx->engine[engine->id].lrc_vma = NULL; 1006 ce->lrc_vma = NULL;
1011 ctx->engine[engine->id].lrc_desc = 0; 1007 ce->lrc_desc = 0;
1012 ctx->engine[engine->id].lrc_reg_state = NULL; 1008 ce->lrc_reg_state = NULL;
1013 1009
1014 i915_gem_context_unreference(ctx); 1010 i915_gem_context_unreference(ctx);
1015} 1011}
@@ -2051,7 +2047,7 @@ logical_ring_setup(struct drm_device *dev, enum intel_engine_id id)
2051static int 2047static int
2052logical_ring_init(struct intel_engine_cs *engine) 2048logical_ring_init(struct intel_engine_cs *engine)
2053{ 2049{
2054 struct intel_context *dctx = engine->i915->kernel_context; 2050 struct i915_gem_context *dctx = engine->i915->kernel_context;
2055 int ret; 2051 int ret;
2056 2052
2057 ret = i915_cmd_parser_init_ring(engine); 2053 ret = i915_cmd_parser_init_ring(engine);
@@ -2275,7 +2271,7 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
2275} 2271}
2276 2272
2277static int 2273static int
2278populate_lr_context(struct intel_context *ctx, 2274populate_lr_context(struct i915_gem_context *ctx,
2279 struct drm_i915_gem_object *ctx_obj, 2275 struct drm_i915_gem_object *ctx_obj,
2280 struct intel_engine_cs *engine, 2276 struct intel_engine_cs *engine,
2281 struct intel_ringbuffer *ringbuf) 2277 struct intel_ringbuffer *ringbuf)
@@ -2416,31 +2412,6 @@ populate_lr_context(struct intel_context *ctx,
2416} 2412}
2417 2413
2418/** 2414/**
2419 * intel_lr_context_free() - free the LRC specific bits of a context
2420 * @ctx: the LR context to free.
2421 *
2422 * The real context freeing is done in i915_gem_context_free: this only
2423 * takes care of the bits that are LRC related: the per-engine backing
2424 * objects and the logical ringbuffer.
2425 */
2426void intel_lr_context_free(struct intel_context *ctx)
2427{
2428 int i;
2429
2430 for (i = I915_NUM_ENGINES; --i >= 0; ) {
2431 struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
2432 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
2433
2434 if (!ctx_obj)
2435 continue;
2436
2437 WARN_ON(ctx->engine[i].pin_count);
2438 intel_ringbuffer_free(ringbuf);
2439 drm_gem_object_unreference(&ctx_obj->base);
2440 }
2441}
2442
2443/**
2444 * intel_lr_context_size() - return the size of the context for an engine 2415 * intel_lr_context_size() - return the size of the context for an engine
2445 * @ring: which engine to find the context size for 2416 * @ring: which engine to find the context size for
2446 * 2417 *
@@ -2491,16 +2462,16 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
2491 * 2462 *
2492 * Return: non-zero on error. 2463 * Return: non-zero on error.
2493 */ 2464 */
2494static int execlists_context_deferred_alloc(struct intel_context *ctx, 2465static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
2495 struct intel_engine_cs *engine) 2466 struct intel_engine_cs *engine)
2496{ 2467{
2497 struct drm_i915_gem_object *ctx_obj; 2468 struct drm_i915_gem_object *ctx_obj;
2469 struct intel_context *ce = &ctx->engine[engine->id];
2498 uint32_t context_size; 2470 uint32_t context_size;
2499 struct intel_ringbuffer *ringbuf; 2471 struct intel_ringbuffer *ringbuf;
2500 int ret; 2472 int ret;
2501 2473
2502 WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL); 2474 WARN_ON(ce->state);
2503 WARN_ON(ctx->engine[engine->id].state);
2504 2475
2505 context_size = round_up(intel_lr_context_size(engine), 4096); 2476 context_size = round_up(intel_lr_context_size(engine), 4096);
2506 2477
@@ -2525,9 +2496,9 @@ static int execlists_context_deferred_alloc(struct intel_context *ctx,
2525 goto error_ringbuf; 2496 goto error_ringbuf;
2526 } 2497 }
2527 2498
2528 ctx->engine[engine->id].ringbuf = ringbuf; 2499 ce->ringbuf = ringbuf;
2529 ctx->engine[engine->id].state = ctx_obj; 2500 ce->state = ctx_obj;
2530 ctx->engine[engine->id].initialised = engine->init_context == NULL; 2501 ce->initialised = engine->init_context == NULL;
2531 2502
2532 return 0; 2503 return 0;
2533 2504
@@ -2535,21 +2506,19 @@ error_ringbuf:
2535 intel_ringbuffer_free(ringbuf); 2506 intel_ringbuffer_free(ringbuf);
2536error_deref_obj: 2507error_deref_obj:
2537 drm_gem_object_unreference(&ctx_obj->base); 2508 drm_gem_object_unreference(&ctx_obj->base);
2538 ctx->engine[engine->id].ringbuf = NULL; 2509 ce->ringbuf = NULL;
2539 ctx->engine[engine->id].state = NULL; 2510 ce->state = NULL;
2540 return ret; 2511 return ret;
2541} 2512}
2542 2513
2543void intel_lr_context_reset(struct drm_i915_private *dev_priv, 2514void intel_lr_context_reset(struct drm_i915_private *dev_priv,
2544 struct intel_context *ctx) 2515 struct i915_gem_context *ctx)
2545{ 2516{
2546 struct intel_engine_cs *engine; 2517 struct intel_engine_cs *engine;
2547 2518
2548 for_each_engine(engine, dev_priv) { 2519 for_each_engine(engine, dev_priv) {
2549 struct drm_i915_gem_object *ctx_obj = 2520 struct intel_context *ce = &ctx->engine[engine->id];
2550 ctx->engine[engine->id].state; 2521 struct drm_i915_gem_object *ctx_obj = ce->state;
2551 struct intel_ringbuffer *ringbuf =
2552 ctx->engine[engine->id].ringbuf;
2553 void *vaddr; 2522 void *vaddr;
2554 uint32_t *reg_state; 2523 uint32_t *reg_state;
2555 2524
@@ -2568,7 +2537,7 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv,
2568 2537
2569 i915_gem_object_unpin_map(ctx_obj); 2538 i915_gem_object_unpin_map(ctx_obj);
2570 2539
2571 ringbuf->head = 0; 2540 ce->ringbuf->head = 0;
2572 ringbuf->tail = 0; 2541 ce->ringbuf->tail = 0;
2573 } 2542 }
2574} 2543}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 1afba0331dc6..a8db42a9c50f 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -99,16 +99,17 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
99#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1) 99#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1)
100#define LRC_STATE_PN (LRC_PPHWSP_PN + 1) 100#define LRC_STATE_PN (LRC_PPHWSP_PN + 1)
101 101
102void intel_lr_context_free(struct intel_context *ctx); 102struct i915_gem_context;
103
103uint32_t intel_lr_context_size(struct intel_engine_cs *engine); 104uint32_t intel_lr_context_size(struct intel_engine_cs *engine);
104void intel_lr_context_unpin(struct intel_context *ctx, 105void intel_lr_context_unpin(struct i915_gem_context *ctx,
105 struct intel_engine_cs *engine); 106 struct intel_engine_cs *engine);
106 107
107struct drm_i915_private; 108struct drm_i915_private;
108 109
109void intel_lr_context_reset(struct drm_i915_private *dev_priv, 110void intel_lr_context_reset(struct drm_i915_private *dev_priv,
110 struct intel_context *ctx); 111 struct i915_gem_context *ctx);
111uint64_t intel_lr_context_descriptor(struct intel_context *ctx, 112uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
112 struct intel_engine_cs *engine); 113 struct intel_engine_cs *engine);
113 114
114/* Execlists */ 115/* Execlists */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index d65fd945607a..62eaa895fe5b 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -978,7 +978,7 @@ void intel_lvds_init(struct drm_device *dev)
978 DRM_MODE_CONNECTOR_LVDS); 978 DRM_MODE_CONNECTOR_LVDS);
979 979
980 drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs, 980 drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
981 DRM_MODE_ENCODER_LVDS, NULL); 981 DRM_MODE_ENCODER_LVDS, "LVDS");
982 982
983 intel_encoder->enable = intel_enable_lvds; 983 intel_encoder->enable = intel_enable_lvds;
984 intel_encoder->pre_enable = intel_pre_enable_lvds; 984 intel_encoder->pre_enable = intel_pre_enable_lvds;
@@ -1082,6 +1082,8 @@ void intel_lvds_init(struct drm_device *dev)
1082 fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode); 1082 fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
1083 if (fixed_mode) { 1083 if (fixed_mode) {
1084 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 1084 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
1085 connector->display_info.width_mm = fixed_mode->width_mm;
1086 connector->display_info.height_mm = fixed_mode->height_mm;
1085 goto out; 1087 goto out;
1086 } 1088 }
1087 } 1089 }
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 8347fd8af8e4..f6d8a21d2c49 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -240,10 +240,11 @@ struct opregion_asle_ext {
240 240
241#define MAX_DSLP 1500 241#define MAX_DSLP 1500
242 242
243static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) 243static int swsci(struct drm_i915_private *dev_priv,
244 u32 function, u32 parm, u32 *parm_out)
244{ 245{
245 struct drm_i915_private *dev_priv = dev->dev_private;
246 struct opregion_swsci *swsci = dev_priv->opregion.swsci; 246 struct opregion_swsci *swsci = dev_priv->opregion.swsci;
247 struct pci_dev *pdev = dev_priv->dev->pdev;
247 u32 main_function, sub_function, scic; 248 u32 main_function, sub_function, scic;
248 u16 swsci_val; 249 u16 swsci_val;
249 u32 dslp; 250 u32 dslp;
@@ -293,16 +294,16 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
293 swsci->scic = scic; 294 swsci->scic = scic;
294 295
295 /* Ensure SCI event is selected and event trigger is cleared. */ 296 /* Ensure SCI event is selected and event trigger is cleared. */
296 pci_read_config_word(dev->pdev, SWSCI, &swsci_val); 297 pci_read_config_word(pdev, SWSCI, &swsci_val);
297 if (!(swsci_val & SWSCI_SCISEL) || (swsci_val & SWSCI_GSSCIE)) { 298 if (!(swsci_val & SWSCI_SCISEL) || (swsci_val & SWSCI_GSSCIE)) {
298 swsci_val |= SWSCI_SCISEL; 299 swsci_val |= SWSCI_SCISEL;
299 swsci_val &= ~SWSCI_GSSCIE; 300 swsci_val &= ~SWSCI_GSSCIE;
300 pci_write_config_word(dev->pdev, SWSCI, swsci_val); 301 pci_write_config_word(pdev, SWSCI, swsci_val);
301 } 302 }
302 303
303 /* Use event trigger to tell bios to check the mail. */ 304 /* Use event trigger to tell bios to check the mail. */
304 swsci_val |= SWSCI_GSSCIE; 305 swsci_val |= SWSCI_GSSCIE;
305 pci_write_config_word(dev->pdev, SWSCI, swsci_val); 306 pci_write_config_word(pdev, SWSCI, swsci_val);
306 307
307 /* Poll for the result. */ 308 /* Poll for the result. */
308#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0) 309#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
@@ -336,13 +337,13 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
336int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, 337int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
337 bool enable) 338 bool enable)
338{ 339{
339 struct drm_device *dev = intel_encoder->base.dev; 340 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
340 u32 parm = 0; 341 u32 parm = 0;
341 u32 type = 0; 342 u32 type = 0;
342 u32 port; 343 u32 port;
343 344
344 /* don't care about old stuff for now */ 345 /* don't care about old stuff for now */
345 if (!HAS_DDI(dev)) 346 if (!HAS_DDI(dev_priv))
346 return 0; 347 return 0;
347 348
348 if (intel_encoder->type == INTEL_OUTPUT_DSI) 349 if (intel_encoder->type == INTEL_OUTPUT_DSI)
@@ -382,7 +383,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
382 383
383 parm |= type << (16 + port * 3); 384 parm |= type << (16 + port * 3);
384 385
385 return swsci(dev, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL); 386 return swsci(dev_priv, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
386} 387}
387 388
388static const struct { 389static const struct {
@@ -396,27 +397,28 @@ static const struct {
396 { PCI_D3cold, 0x04 }, 397 { PCI_D3cold, 0x04 },
397}; 398};
398 399
399int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) 400int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
401 pci_power_t state)
400{ 402{
401 int i; 403 int i;
402 404
403 if (!HAS_DDI(dev)) 405 if (!HAS_DDI(dev_priv))
404 return 0; 406 return 0;
405 407
406 for (i = 0; i < ARRAY_SIZE(power_state_map); i++) { 408 for (i = 0; i < ARRAY_SIZE(power_state_map); i++) {
407 if (state == power_state_map[i].pci_power_state) 409 if (state == power_state_map[i].pci_power_state)
408 return swsci(dev, SWSCI_SBCB_ADAPTER_POWER_STATE, 410 return swsci(dev_priv, SWSCI_SBCB_ADAPTER_POWER_STATE,
409 power_state_map[i].parm, NULL); 411 power_state_map[i].parm, NULL);
410 } 412 }
411 413
412 return -EINVAL; 414 return -EINVAL;
413} 415}
414 416
415static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) 417static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
416{ 418{
417 struct drm_i915_private *dev_priv = dev->dev_private;
418 struct intel_connector *connector; 419 struct intel_connector *connector;
419 struct opregion_asle *asle = dev_priv->opregion.asle; 420 struct opregion_asle *asle = dev_priv->opregion.asle;
421 struct drm_device *dev = dev_priv->dev;
420 422
421 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); 423 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
422 424
@@ -449,7 +451,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
449 return 0; 451 return 0;
450} 452}
451 453
452static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi) 454static u32 asle_set_als_illum(struct drm_i915_private *dev_priv, u32 alsi)
453{ 455{
454 /* alsi is the current ALS reading in lux. 0 indicates below sensor 456 /* alsi is the current ALS reading in lux. 0 indicates below sensor
455 range, 0xffff indicates above sensor range. 1-0xfffe are valid */ 457 range, 0xffff indicates above sensor range. 1-0xfffe are valid */
@@ -457,13 +459,13 @@ static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
457 return ASLC_ALS_ILLUM_FAILED; 459 return ASLC_ALS_ILLUM_FAILED;
458} 460}
459 461
460static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb) 462static u32 asle_set_pwm_freq(struct drm_i915_private *dev_priv, u32 pfmb)
461{ 463{
462 DRM_DEBUG_DRIVER("PWM freq is not supported\n"); 464 DRM_DEBUG_DRIVER("PWM freq is not supported\n");
463 return ASLC_PWM_FREQ_FAILED; 465 return ASLC_PWM_FREQ_FAILED;
464} 466}
465 467
466static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) 468static u32 asle_set_pfit(struct drm_i915_private *dev_priv, u32 pfit)
467{ 469{
468 /* Panel fitting is currently controlled by the X code, so this is a 470 /* Panel fitting is currently controlled by the X code, so this is a
469 noop until modesetting support works fully */ 471 noop until modesetting support works fully */
@@ -471,13 +473,13 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
471 return ASLC_PFIT_FAILED; 473 return ASLC_PFIT_FAILED;
472} 474}
473 475
474static u32 asle_set_supported_rotation_angles(struct drm_device *dev, u32 srot) 476static u32 asle_set_supported_rotation_angles(struct drm_i915_private *dev_priv, u32 srot)
475{ 477{
476 DRM_DEBUG_DRIVER("SROT is not supported\n"); 478 DRM_DEBUG_DRIVER("SROT is not supported\n");
477 return ASLC_ROTATION_ANGLES_FAILED; 479 return ASLC_ROTATION_ANGLES_FAILED;
478} 480}
479 481
480static u32 asle_set_button_array(struct drm_device *dev, u32 iuer) 482static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer)
481{ 483{
482 if (!iuer) 484 if (!iuer)
483 DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n"); 485 DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n");
@@ -495,7 +497,7 @@ static u32 asle_set_button_array(struct drm_device *dev, u32 iuer)
495 return ASLC_BUTTON_ARRAY_FAILED; 497 return ASLC_BUTTON_ARRAY_FAILED;
496} 498}
497 499
498static u32 asle_set_convertible(struct drm_device *dev, u32 iuer) 500static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer)
499{ 501{
500 if (iuer & ASLE_IUER_CONVERTIBLE) 502 if (iuer & ASLE_IUER_CONVERTIBLE)
501 DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n"); 503 DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n");
@@ -505,7 +507,7 @@ static u32 asle_set_convertible(struct drm_device *dev, u32 iuer)
505 return ASLC_CONVERTIBLE_FAILED; 507 return ASLC_CONVERTIBLE_FAILED;
506} 508}
507 509
508static u32 asle_set_docking(struct drm_device *dev, u32 iuer) 510static u32 asle_set_docking(struct drm_i915_private *dev_priv, u32 iuer)
509{ 511{
510 if (iuer & ASLE_IUER_DOCKING) 512 if (iuer & ASLE_IUER_DOCKING)
511 DRM_DEBUG_DRIVER("Docking is not supported (docked)\n"); 513 DRM_DEBUG_DRIVER("Docking is not supported (docked)\n");
@@ -515,7 +517,7 @@ static u32 asle_set_docking(struct drm_device *dev, u32 iuer)
515 return ASLC_DOCKING_FAILED; 517 return ASLC_DOCKING_FAILED;
516} 518}
517 519
518static u32 asle_isct_state(struct drm_device *dev) 520static u32 asle_isct_state(struct drm_i915_private *dev_priv)
519{ 521{
520 DRM_DEBUG_DRIVER("ISCT is not supported\n"); 522 DRM_DEBUG_DRIVER("ISCT is not supported\n");
521 return ASLC_ISCT_STATE_FAILED; 523 return ASLC_ISCT_STATE_FAILED;
@@ -527,7 +529,6 @@ static void asle_work(struct work_struct *work)
527 container_of(work, struct intel_opregion, asle_work); 529 container_of(work, struct intel_opregion, asle_work);
528 struct drm_i915_private *dev_priv = 530 struct drm_i915_private *dev_priv =
529 container_of(opregion, struct drm_i915_private, opregion); 531 container_of(opregion, struct drm_i915_private, opregion);
530 struct drm_device *dev = dev_priv->dev;
531 struct opregion_asle *asle = dev_priv->opregion.asle; 532 struct opregion_asle *asle = dev_priv->opregion.asle;
532 u32 aslc_stat = 0; 533 u32 aslc_stat = 0;
533 u32 aslc_req; 534 u32 aslc_req;
@@ -544,32 +545,32 @@ static void asle_work(struct work_struct *work)
544 } 545 }
545 546
546 if (aslc_req & ASLC_SET_ALS_ILLUM) 547 if (aslc_req & ASLC_SET_ALS_ILLUM)
547 aslc_stat |= asle_set_als_illum(dev, asle->alsi); 548 aslc_stat |= asle_set_als_illum(dev_priv, asle->alsi);
548 549
549 if (aslc_req & ASLC_SET_BACKLIGHT) 550 if (aslc_req & ASLC_SET_BACKLIGHT)
550 aslc_stat |= asle_set_backlight(dev, asle->bclp); 551 aslc_stat |= asle_set_backlight(dev_priv, asle->bclp);
551 552
552 if (aslc_req & ASLC_SET_PFIT) 553 if (aslc_req & ASLC_SET_PFIT)
553 aslc_stat |= asle_set_pfit(dev, asle->pfit); 554 aslc_stat |= asle_set_pfit(dev_priv, asle->pfit);
554 555
555 if (aslc_req & ASLC_SET_PWM_FREQ) 556 if (aslc_req & ASLC_SET_PWM_FREQ)
556 aslc_stat |= asle_set_pwm_freq(dev, asle->pfmb); 557 aslc_stat |= asle_set_pwm_freq(dev_priv, asle->pfmb);
557 558
558 if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES) 559 if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
559 aslc_stat |= asle_set_supported_rotation_angles(dev, 560 aslc_stat |= asle_set_supported_rotation_angles(dev_priv,
560 asle->srot); 561 asle->srot);
561 562
562 if (aslc_req & ASLC_BUTTON_ARRAY) 563 if (aslc_req & ASLC_BUTTON_ARRAY)
563 aslc_stat |= asle_set_button_array(dev, asle->iuer); 564 aslc_stat |= asle_set_button_array(dev_priv, asle->iuer);
564 565
565 if (aslc_req & ASLC_CONVERTIBLE_INDICATOR) 566 if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
566 aslc_stat |= asle_set_convertible(dev, asle->iuer); 567 aslc_stat |= asle_set_convertible(dev_priv, asle->iuer);
567 568
568 if (aslc_req & ASLC_DOCKING_INDICATOR) 569 if (aslc_req & ASLC_DOCKING_INDICATOR)
569 aslc_stat |= asle_set_docking(dev, asle->iuer); 570 aslc_stat |= asle_set_docking(dev_priv, asle->iuer);
570 571
571 if (aslc_req & ASLC_ISCT_STATE_CHANGE) 572 if (aslc_req & ASLC_ISCT_STATE_CHANGE)
572 aslc_stat |= asle_isct_state(dev); 573 aslc_stat |= asle_isct_state(dev_priv);
573 574
574 asle->aslc = aslc_stat; 575 asle->aslc = aslc_stat;
575} 576}
@@ -656,10 +657,10 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val)
656 } 657 }
657} 658}
658 659
659static void intel_didl_outputs(struct drm_device *dev) 660static void intel_didl_outputs(struct drm_i915_private *dev_priv)
660{ 661{
661 struct drm_i915_private *dev_priv = dev->dev_private;
662 struct intel_opregion *opregion = &dev_priv->opregion; 662 struct intel_opregion *opregion = &dev_priv->opregion;
663 struct pci_dev *pdev = dev_priv->dev->pdev;
663 struct drm_connector *connector; 664 struct drm_connector *connector;
664 acpi_handle handle; 665 acpi_handle handle;
665 struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; 666 struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
@@ -668,7 +669,7 @@ static void intel_didl_outputs(struct drm_device *dev)
668 u32 temp, max_outputs; 669 u32 temp, max_outputs;
669 int i = 0; 670 int i = 0;
670 671
671 handle = ACPI_HANDLE(&dev->pdev->dev); 672 handle = ACPI_HANDLE(&pdev->dev);
672 if (!handle || acpi_bus_get_device(handle, &acpi_dev)) 673 if (!handle || acpi_bus_get_device(handle, &acpi_dev))
673 return; 674 return;
674 675
@@ -723,7 +724,7 @@ end:
723 724
724blind_set: 725blind_set:
725 i = 0; 726 i = 0;
726 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 727 list_for_each_entry(connector, &dev_priv->dev->mode_config.connector_list, head) {
727 int output_type = ACPI_OTHER_OUTPUT; 728 int output_type = ACPI_OTHER_OUTPUT;
728 if (i >= max_outputs) { 729 if (i >= max_outputs) {
729 DRM_DEBUG_KMS("More than %u outputs in connector list\n", 730 DRM_DEBUG_KMS("More than %u outputs in connector list\n",
@@ -759,9 +760,8 @@ blind_set:
759 goto end; 760 goto end;
760} 761}
761 762
762static void intel_setup_cadls(struct drm_device *dev) 763static void intel_setup_cadls(struct drm_i915_private *dev_priv)
763{ 764{
764 struct drm_i915_private *dev_priv = dev->dev_private;
765 struct intel_opregion *opregion = &dev_priv->opregion; 765 struct intel_opregion *opregion = &dev_priv->opregion;
766 int i = 0; 766 int i = 0;
767 u32 disp_id; 767 u32 disp_id;
@@ -778,17 +778,16 @@ static void intel_setup_cadls(struct drm_device *dev)
778 } while (++i < 8 && disp_id != 0); 778 } while (++i < 8 && disp_id != 0);
779} 779}
780 780
781void intel_opregion_init(struct drm_device *dev) 781void intel_opregion_register(struct drm_i915_private *dev_priv)
782{ 782{
783 struct drm_i915_private *dev_priv = dev->dev_private;
784 struct intel_opregion *opregion = &dev_priv->opregion; 783 struct intel_opregion *opregion = &dev_priv->opregion;
785 784
786 if (!opregion->header) 785 if (!opregion->header)
787 return; 786 return;
788 787
789 if (opregion->acpi) { 788 if (opregion->acpi) {
790 intel_didl_outputs(dev); 789 intel_didl_outputs(dev_priv);
791 intel_setup_cadls(dev); 790 intel_setup_cadls(dev_priv);
792 791
793 /* Notify BIOS we are ready to handle ACPI video ext notifs. 792 /* Notify BIOS we are ready to handle ACPI video ext notifs.
794 * Right now, all the events are handled by the ACPI video module. 793 * Right now, all the events are handled by the ACPI video module.
@@ -806,9 +805,8 @@ void intel_opregion_init(struct drm_device *dev)
806 } 805 }
807} 806}
808 807
809void intel_opregion_fini(struct drm_device *dev) 808void intel_opregion_unregister(struct drm_i915_private *dev_priv)
810{ 809{
811 struct drm_i915_private *dev_priv = dev->dev_private;
812 struct intel_opregion *opregion = &dev_priv->opregion; 810 struct intel_opregion *opregion = &dev_priv->opregion;
813 811
814 if (!opregion->header) 812 if (!opregion->header)
@@ -840,9 +838,8 @@ void intel_opregion_fini(struct drm_device *dev)
840 opregion->lid_state = NULL; 838 opregion->lid_state = NULL;
841} 839}
842 840
843static void swsci_setup(struct drm_device *dev) 841static void swsci_setup(struct drm_i915_private *dev_priv)
844{ 842{
845 struct drm_i915_private *dev_priv = dev->dev_private;
846 struct intel_opregion *opregion = &dev_priv->opregion; 843 struct intel_opregion *opregion = &dev_priv->opregion;
847 bool requested_callbacks = false; 844 bool requested_callbacks = false;
848 u32 tmp; 845 u32 tmp;
@@ -852,7 +849,7 @@ static void swsci_setup(struct drm_device *dev)
852 opregion->swsci_sbcb_sub_functions = 1; 849 opregion->swsci_sbcb_sub_functions = 1;
853 850
854 /* We use GBDA to ask for supported GBDA calls. */ 851 /* We use GBDA to ask for supported GBDA calls. */
855 if (swsci(dev, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) { 852 if (swsci(dev_priv, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
856 /* make the bits match the sub-function codes */ 853 /* make the bits match the sub-function codes */
857 tmp <<= 1; 854 tmp <<= 1;
858 opregion->swsci_gbda_sub_functions |= tmp; 855 opregion->swsci_gbda_sub_functions |= tmp;
@@ -863,7 +860,7 @@ static void swsci_setup(struct drm_device *dev)
863 * must not call interfaces that are not specifically requested by the 860 * must not call interfaces that are not specifically requested by the
864 * bios. 861 * bios.
865 */ 862 */
866 if (swsci(dev, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) { 863 if (swsci(dev_priv, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
867 /* here, the bits already match sub-function codes */ 864 /* here, the bits already match sub-function codes */
868 opregion->swsci_sbcb_sub_functions |= tmp; 865 opregion->swsci_sbcb_sub_functions |= tmp;
869 requested_callbacks = true; 866 requested_callbacks = true;
@@ -874,7 +871,7 @@ static void swsci_setup(struct drm_device *dev)
874 * the callback is _requested_. But we still can't call interfaces that 871 * the callback is _requested_. But we still can't call interfaces that
875 * are not requested. 872 * are not requested.
876 */ 873 */
877 if (swsci(dev, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) { 874 if (swsci(dev_priv, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
878 /* make the bits match the sub-function codes */ 875 /* make the bits match the sub-function codes */
879 u32 low = tmp & 0x7ff; 876 u32 low = tmp & 0x7ff;
880 u32 high = tmp & ~0xfff; /* bit 11 is reserved */ 877 u32 high = tmp & ~0xfff; /* bit 11 is reserved */
@@ -916,10 +913,10 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
916 { } 913 { }
917}; 914};
918 915
919int intel_opregion_setup(struct drm_device *dev) 916int intel_opregion_setup(struct drm_i915_private *dev_priv)
920{ 917{
921 struct drm_i915_private *dev_priv = dev->dev_private;
922 struct intel_opregion *opregion = &dev_priv->opregion; 918 struct intel_opregion *opregion = &dev_priv->opregion;
919 struct pci_dev *pdev = dev_priv->dev->pdev;
923 u32 asls, mboxes; 920 u32 asls, mboxes;
924 char buf[sizeof(OPREGION_SIGNATURE)]; 921 char buf[sizeof(OPREGION_SIGNATURE)];
925 int err = 0; 922 int err = 0;
@@ -931,7 +928,7 @@ int intel_opregion_setup(struct drm_device *dev)
931 BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100); 928 BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100);
932 BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400); 929 BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400);
933 930
934 pci_read_config_dword(dev->pdev, ASLS, &asls); 931 pci_read_config_dword(pdev, ASLS, &asls);
935 DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls); 932 DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
936 if (asls == 0) { 933 if (asls == 0) {
937 DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n"); 934 DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
@@ -963,7 +960,7 @@ int intel_opregion_setup(struct drm_device *dev)
963 if (mboxes & MBOX_SWSCI) { 960 if (mboxes & MBOX_SWSCI) {
964 DRM_DEBUG_DRIVER("SWSCI supported\n"); 961 DRM_DEBUG_DRIVER("SWSCI supported\n");
965 opregion->swsci = base + OPREGION_SWSCI_OFFSET; 962 opregion->swsci = base + OPREGION_SWSCI_OFFSET;
966 swsci_setup(dev); 963 swsci_setup(dev_priv);
967 } 964 }
968 965
969 if (mboxes & MBOX_ASLE) { 966 if (mboxes & MBOX_ASLE) {
@@ -1012,12 +1009,12 @@ err_out:
1012} 1009}
1013 1010
1014int 1011int
1015intel_opregion_get_panel_type(struct drm_device *dev) 1012intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
1016{ 1013{
1017 u32 panel_details; 1014 u32 panel_details;
1018 int ret; 1015 int ret;
1019 1016
1020 ret = swsci(dev, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details); 1017 ret = swsci(dev_priv, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
1021 if (ret) { 1018 if (ret) {
1022 DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n", 1019 DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n",
1023 ret); 1020 ret);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 29bdd79d9039..08274591db7e 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -26,6 +26,7 @@
26 */ 26 */
27 27
28#include <linux/cpufreq.h> 28#include <linux/cpufreq.h>
29#include <drm/drm_plane_helper.h>
29#include "i915_drv.h" 30#include "i915_drv.h"
30#include "intel_drv.h" 31#include "intel_drv.h"
31#include "../../../platform/x86/intel_ips.h" 32#include "../../../platform/x86/intel_ips.h"
@@ -2949,6 +2950,46 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2949 } 2950 }
2950} 2951}
2951 2952
2953/*
2954 * Determines the downscale amount of a plane for the purposes of watermark calculations.
2955 * The bspec defines downscale amount as:
2956 *
2957 * """
2958 * Horizontal down scale amount = maximum[1, Horizontal source size /
2959 * Horizontal destination size]
2960 * Vertical down scale amount = maximum[1, Vertical source size /
2961 * Vertical destination size]
2962 * Total down scale amount = Horizontal down scale amount *
2963 * Vertical down scale amount
2964 * """
2965 *
2966 * Return value is provided in 16.16 fixed point form to retain fractional part.
2967 * Caller should take care of dividing & rounding off the value.
2968 */
2969static uint32_t
2970skl_plane_downscale_amount(const struct intel_plane_state *pstate)
2971{
2972 uint32_t downscale_h, downscale_w;
2973 uint32_t src_w, src_h, dst_w, dst_h;
2974
2975 if (WARN_ON(!pstate->visible))
2976 return DRM_PLANE_HELPER_NO_SCALING;
2977
2978 /* n.b., src is 16.16 fixed point, dst is whole integer */
2979 src_w = drm_rect_width(&pstate->src);
2980 src_h = drm_rect_height(&pstate->src);
2981 dst_w = drm_rect_width(&pstate->dst);
2982 dst_h = drm_rect_height(&pstate->dst);
2983 if (intel_rotation_90_or_270(pstate->base.rotation))
2984 swap(dst_w, dst_h);
2985
2986 downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
2987 downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
2988
2989 /* Provide result in 16.16 fixed point */
2990 return (uint64_t)downscale_w * downscale_h >> 16;
2991}
2992
2952static unsigned int 2993static unsigned int
2953skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, 2994skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
2954 const struct drm_plane_state *pstate, 2995 const struct drm_plane_state *pstate,
@@ -2956,6 +2997,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
2956{ 2997{
2957 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); 2998 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
2958 struct drm_framebuffer *fb = pstate->fb; 2999 struct drm_framebuffer *fb = pstate->fb;
3000 uint32_t down_scale_amount, data_rate;
2959 uint32_t width = 0, height = 0; 3001 uint32_t width = 0, height = 0;
2960 unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888; 3002 unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888;
2961 3003
@@ -2975,15 +3017,19 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
2975 /* for planar format */ 3017 /* for planar format */
2976 if (format == DRM_FORMAT_NV12) { 3018 if (format == DRM_FORMAT_NV12) {
2977 if (y) /* y-plane data rate */ 3019 if (y) /* y-plane data rate */
2978 return width * height * 3020 data_rate = width * height *
2979 drm_format_plane_cpp(format, 0); 3021 drm_format_plane_cpp(format, 0);
2980 else /* uv-plane data rate */ 3022 else /* uv-plane data rate */
2981 return (width / 2) * (height / 2) * 3023 data_rate = (width / 2) * (height / 2) *
2982 drm_format_plane_cpp(format, 1); 3024 drm_format_plane_cpp(format, 1);
3025 } else {
3026 /* for packed formats */
3027 data_rate = width * height * drm_format_plane_cpp(format, 0);
2983 } 3028 }
2984 3029
2985 /* for packed formats */ 3030 down_scale_amount = skl_plane_downscale_amount(intel_pstate);
2986 return width * height * drm_format_plane_cpp(format, 0); 3031
3032 return (uint64_t)data_rate * down_scale_amount >> 16;
2987} 3033}
2988 3034
2989/* 3035/*
@@ -3042,6 +3088,69 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate)
3042 return total_data_rate; 3088 return total_data_rate;
3043} 3089}
3044 3090
3091static uint16_t
3092skl_ddb_min_alloc(const struct drm_plane_state *pstate,
3093 const int y)
3094{
3095 struct drm_framebuffer *fb = pstate->fb;
3096 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
3097 uint32_t src_w, src_h;
3098 uint32_t min_scanlines = 8;
3099 uint8_t plane_bpp;
3100
3101 if (WARN_ON(!fb))
3102 return 0;
3103
3104 /* For packed formats, no y-plane, return 0 */
3105 if (y && fb->pixel_format != DRM_FORMAT_NV12)
3106 return 0;
3107
3108 /* For Non Y-tile return 8-blocks */
3109 if (fb->modifier[0] != I915_FORMAT_MOD_Y_TILED &&
3110 fb->modifier[0] != I915_FORMAT_MOD_Yf_TILED)
3111 return 8;
3112
3113 src_w = drm_rect_width(&intel_pstate->src) >> 16;
3114 src_h = drm_rect_height(&intel_pstate->src) >> 16;
3115
3116 if (intel_rotation_90_or_270(pstate->rotation))
3117 swap(src_w, src_h);
3118
3119 /* Halve UV plane width and height for NV12 */
3120 if (fb->pixel_format == DRM_FORMAT_NV12 && !y) {
3121 src_w /= 2;
3122 src_h /= 2;
3123 }
3124
3125 if (fb->pixel_format == DRM_FORMAT_NV12 && !y)
3126 plane_bpp = drm_format_plane_cpp(fb->pixel_format, 1);
3127 else
3128 plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0);
3129
3130 if (intel_rotation_90_or_270(pstate->rotation)) {
3131 switch (plane_bpp) {
3132 case 1:
3133 min_scanlines = 32;
3134 break;
3135 case 2:
3136 min_scanlines = 16;
3137 break;
3138 case 4:
3139 min_scanlines = 8;
3140 break;
3141 case 8:
3142 min_scanlines = 4;
3143 break;
3144 default:
3145 WARN(1, "Unsupported pixel depth %u for rotation",
3146 plane_bpp);
3147 min_scanlines = 32;
3148 }
3149 }
3150
3151 return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
3152}
3153
3045static int 3154static int
3046skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, 3155skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3047 struct skl_ddb_allocation *ddb /* out */) 3156 struct skl_ddb_allocation *ddb /* out */)
@@ -3104,11 +3213,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3104 continue; 3213 continue;
3105 } 3214 }
3106 3215
3107 minimum[id] = 8; 3216 minimum[id] = skl_ddb_min_alloc(pstate, 0);
3108 if (pstate->fb->pixel_format == DRM_FORMAT_NV12) 3217 y_minimum[id] = skl_ddb_min_alloc(pstate, 1);
3109 y_minimum[id] = 8;
3110 else
3111 y_minimum[id] = 0;
3112 } 3218 }
3113 3219
3114 for (i = 0; i < PLANE_CURSOR; i++) { 3220 for (i = 0; i < PLANE_CURSOR; i++) {
@@ -3225,6 +3331,30 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
3225 return ret; 3331 return ret;
3226} 3332}
3227 3333
3334static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
3335 struct intel_plane_state *pstate)
3336{
3337 uint64_t adjusted_pixel_rate;
3338 uint64_t downscale_amount;
3339 uint64_t pixel_rate;
3340
3341 /* Shouldn't reach here on disabled planes... */
3342 if (WARN_ON(!pstate->visible))
3343 return 0;
3344
3345 /*
3346 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
3347 * with additional adjustments for plane-specific scaling.
3348 */
3349 adjusted_pixel_rate = skl_pipe_pixel_rate(cstate);
3350 downscale_amount = skl_plane_downscale_amount(pstate);
3351
3352 pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
3353 WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0));
3354
3355 return pixel_rate;
3356}
3357
3228static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, 3358static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3229 struct intel_crtc_state *cstate, 3359 struct intel_crtc_state *cstate,
3230 struct intel_plane_state *intel_pstate, 3360 struct intel_plane_state *intel_pstate,
@@ -3243,6 +3373,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3243 uint32_t selected_result; 3373 uint32_t selected_result;
3244 uint8_t cpp; 3374 uint8_t cpp;
3245 uint32_t width = 0, height = 0; 3375 uint32_t width = 0, height = 0;
3376 uint32_t plane_pixel_rate;
3246 3377
3247 if (latency == 0 || !cstate->base.active || !intel_pstate->visible) { 3378 if (latency == 0 || !cstate->base.active || !intel_pstate->visible) {
3248 *enabled = false; 3379 *enabled = false;
@@ -3256,9 +3387,10 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3256 swap(width, height); 3387 swap(width, height);
3257 3388
3258 cpp = drm_format_plane_cpp(fb->pixel_format, 0); 3389 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
3259 method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate), 3390 plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
3260 cpp, latency); 3391
3261 method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate), 3392 method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
3393 method2 = skl_wm_method2(plane_pixel_rate,
3262 cstate->base.adjusted_mode.crtc_htotal, 3394 cstate->base.adjusted_mode.crtc_htotal,
3263 width, 3395 width,
3264 cpp, 3396 cpp,
@@ -4046,7 +4178,6 @@ void skl_wm_get_hw_state(struct drm_device *dev)
4046 struct drm_i915_private *dev_priv = dev->dev_private; 4178 struct drm_i915_private *dev_priv = dev->dev_private;
4047 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb; 4179 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
4048 struct drm_crtc *crtc; 4180 struct drm_crtc *crtc;
4049 struct intel_crtc *intel_crtc;
4050 4181
4051 skl_ddb_get_hw_state(dev_priv, ddb); 4182 skl_ddb_get_hw_state(dev_priv, ddb);
4052 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 4183 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
@@ -4059,23 +4190,6 @@ void skl_wm_get_hw_state(struct drm_device *dev)
4059 /* Easy/common case; just sanitize DDB now if everything off */ 4190 /* Easy/common case; just sanitize DDB now if everything off */
4060 memset(ddb, 0, sizeof(*ddb)); 4191 memset(ddb, 0, sizeof(*ddb));
4061 } 4192 }
4062
4063 /* Calculate plane data rates */
4064 for_each_intel_crtc(dev, intel_crtc) {
4065 struct intel_crtc_state *cstate = intel_crtc->config;
4066 struct intel_plane *intel_plane;
4067
4068 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
4069 const struct drm_plane_state *pstate =
4070 intel_plane->base.state;
4071 int id = skl_wm_plane_id(intel_plane);
4072
4073 cstate->wm.skl.plane_data_rate[id] =
4074 skl_plane_relative_data_rate(cstate, pstate, 0);
4075 cstate->wm.skl.plane_y_data_rate[id] =
4076 skl_plane_relative_data_rate(cstate, pstate, 1);
4077 }
4078 }
4079} 4193}
4080 4194
4081static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) 4195static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
@@ -5039,7 +5153,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
5039 for_each_engine(engine, dev_priv) 5153 for_each_engine(engine, dev_priv)
5040 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); 5154 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5041 5155
5042 if (HAS_GUC_UCODE(dev_priv)) 5156 if (HAS_GUC(dev_priv))
5043 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA); 5157 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
5044 5158
5045 I915_WRITE(GEN6_RC_SLEEP, 0); 5159 I915_WRITE(GEN6_RC_SLEEP, 0);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 929e7b4af2a4..b33c876fed20 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -119,7 +119,7 @@ struct intel_ringbuffer {
119 u32 last_retired_head; 119 u32 last_retired_head;
120}; 120};
121 121
122struct intel_context; 122struct i915_gem_context;
123struct drm_i915_reg_table; 123struct drm_i915_reg_table;
124 124
125/* 125/*
@@ -310,7 +310,7 @@ struct intel_engine_cs {
310 310
311 wait_queue_head_t irq_queue; 311 wait_queue_head_t irq_queue;
312 312
313 struct intel_context *last_context; 313 struct i915_gem_context *last_context;
314 314
315 struct intel_ring_hangcheck hangcheck; 315 struct intel_ring_hangcheck hangcheck;
316 316
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index b69b935516fb..fe8faf30bda7 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -806,15 +806,27 @@ static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
806 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0; 806 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
807} 807}
808 808
809static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
810{
811 u32 tmp = I915_READ(DBUF_CTL);
812
813 WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
814 (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
815 "Unexpected DBuf power power state (0x%08x)\n", tmp);
816}
817
809static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, 818static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
810 struct i915_power_well *power_well) 819 struct i915_power_well *power_well)
811{ 820{
812 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 821 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
813 822
814 if (IS_BROXTON(dev_priv)) { 823 WARN_ON(dev_priv->cdclk_freq !=
815 broxton_cdclk_verify_state(dev_priv); 824 dev_priv->display.get_display_clock_speed(dev_priv->dev));
825
826 gen9_assert_dbuf_enabled(dev_priv);
827
828 if (IS_BROXTON(dev_priv))
816 broxton_ddi_phy_verify_state(dev_priv); 829 broxton_ddi_phy_verify_state(dev_priv);
817 }
818} 830}
819 831
820static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 832static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
@@ -2176,6 +2188,28 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
2176 mutex_unlock(&power_domains->lock); 2188 mutex_unlock(&power_domains->lock);
2177} 2189}
2178 2190
2191static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
2192{
2193 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
2194 POSTING_READ(DBUF_CTL);
2195
2196 udelay(10);
2197
2198 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
2199 DRM_ERROR("DBuf power enable timeout\n");
2200}
2201
2202static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
2203{
2204 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
2205 POSTING_READ(DBUF_CTL);
2206
2207 udelay(10);
2208
2209 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
2210 DRM_ERROR("DBuf power disable timeout!\n");
2211}
2212
2179static void skl_display_core_init(struct drm_i915_private *dev_priv, 2213static void skl_display_core_init(struct drm_i915_private *dev_priv,
2180 bool resume) 2214 bool resume)
2181{ 2215{
@@ -2200,12 +2234,11 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
2200 2234
2201 mutex_unlock(&power_domains->lock); 2235 mutex_unlock(&power_domains->lock);
2202 2236
2203 if (!resume)
2204 return;
2205
2206 skl_init_cdclk(dev_priv); 2237 skl_init_cdclk(dev_priv);
2207 2238
2208 if (dev_priv->csr.dmc_payload) 2239 gen9_dbuf_enable(dev_priv);
2240
2241 if (resume && dev_priv->csr.dmc_payload)
2209 intel_csr_load_program(dev_priv); 2242 intel_csr_load_program(dev_priv);
2210} 2243}
2211 2244
@@ -2216,6 +2249,8 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
2216 2249
2217 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2250 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2218 2251
2252 gen9_dbuf_disable(dev_priv);
2253
2219 skl_uninit_cdclk(dev_priv); 2254 skl_uninit_cdclk(dev_priv);
2220 2255
2221 /* The spec doesn't call for removing the reset handshake flag */ 2256 /* The spec doesn't call for removing the reset handshake flag */
@@ -2260,9 +2295,11 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
2260 mutex_unlock(&power_domains->lock); 2295 mutex_unlock(&power_domains->lock);
2261 2296
2262 broxton_init_cdclk(dev_priv); 2297 broxton_init_cdclk(dev_priv);
2298
2299 gen9_dbuf_enable(dev_priv);
2300
2263 broxton_ddi_phy_init(dev_priv); 2301 broxton_ddi_phy_init(dev_priv);
2264 2302
2265 broxton_cdclk_verify_state(dev_priv);
2266 broxton_ddi_phy_verify_state(dev_priv); 2303 broxton_ddi_phy_verify_state(dev_priv);
2267 2304
2268 if (resume && dev_priv->csr.dmc_payload) 2305 if (resume && dev_priv->csr.dmc_payload)
@@ -2277,6 +2314,9 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
2277 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2314 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2278 2315
2279 broxton_ddi_phy_uninit(dev_priv); 2316 broxton_ddi_phy_uninit(dev_priv);
2317
2318 gen9_dbuf_disable(dev_priv);
2319
2280 broxton_uninit_cdclk(dev_priv); 2320 broxton_uninit_cdclk(dev_priv);
2281 2321
2282 /* The spec doesn't call for removing the reset handshake flag */ 2322 /* The spec doesn't call for removing the reset handshake flag */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 2128fae5687d..1a71456bd12a 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2981,7 +2981,7 @@ bool intel_sdvo_init(struct drm_device *dev,
2981 intel_encoder = &intel_sdvo->base; 2981 intel_encoder = &intel_sdvo->base;
2982 intel_encoder->type = INTEL_OUTPUT_SDVO; 2982 intel_encoder->type = INTEL_OUTPUT_SDVO;
2983 drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0, 2983 drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0,
2984 NULL); 2984 "SDVO %c", port_name(port));
2985 2985
2986 /* Read the regs to test if we can talk to the device */ 2986 /* Read the regs to test if we can talk to the device */
2987 for (i = 0; i < 0x40; i++) { 2987 for (i = 0; i < 0x40; i++) {
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 97b1a54eb09f..324ccb06397d 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -1114,10 +1114,18 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
1114 1114
1115 possible_crtcs = (1 << pipe); 1115 possible_crtcs = (1 << pipe);
1116 1116
1117 ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs, 1117 if (INTEL_INFO(dev)->gen >= 9)
1118 &intel_plane_funcs, 1118 ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
1119 plane_formats, num_plane_formats, 1119 &intel_plane_funcs,
1120 DRM_PLANE_TYPE_OVERLAY, NULL); 1120 plane_formats, num_plane_formats,
1121 DRM_PLANE_TYPE_OVERLAY,
1122 "plane %d%c", plane + 2, pipe_name(pipe));
1123 else
1124 ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
1125 &intel_plane_funcs,
1126 plane_formats, num_plane_formats,
1127 DRM_PLANE_TYPE_OVERLAY,
1128 "sprite %c", sprite_name(pipe, plane));
1121 if (ret) 1129 if (ret)
1122 goto fail; 1130 goto fail;
1123 1131
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 223129d3c765..1f3a0e1e1a1f 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1591,7 +1591,7 @@ intel_tv_init(struct drm_device *dev)
1591 DRM_MODE_CONNECTOR_SVIDEO); 1591 DRM_MODE_CONNECTOR_SVIDEO);
1592 1592
1593 drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs, 1593 drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
1594 DRM_MODE_ENCODER_TVDAC, NULL); 1594 DRM_MODE_ENCODER_TVDAC, "TV");
1595 1595
1596 intel_encoder->compute_config = intel_tv_compute_config; 1596 intel_encoder->compute_config = intel_tv_compute_config;
1597 intel_encoder->get_config = intel_tv_get_config; 1597 intel_encoder->get_config = intel_tv_get_config;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 385114bca924..c1ca458d688e 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1715,7 +1715,7 @@ int intel_guc_reset(struct drm_i915_private *dev_priv)
1715 int ret; 1715 int ret;
1716 unsigned long irqflags; 1716 unsigned long irqflags;
1717 1717
1718 if (!i915.enable_guc_submission) 1718 if (!HAS_GUC(dev_priv))
1719 return -EINVAL; 1719 return -EINVAL;
1720 1720
1721 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1721 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index 4f9799f025a9..68db9621f1f0 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -403,9 +403,10 @@ struct lvds_dvo_timing {
403 u8 vsync_off:4; 403 u8 vsync_off:4;
404 u8 rsvd0:6; 404 u8 rsvd0:6;
405 u8 hsync_off_hi:2; 405 u8 hsync_off_hi:2;
406 u8 h_image; 406 u8 himage_lo;
407 u8 v_image; 407 u8 vimage_lo;
408 u8 max_hv; 408 u8 vimage_hi:4;
409 u8 himage_hi:4;
409 u8 h_border; 410 u8 h_border;
410 u8 v_border; 411 u8 v_border;
411 u8 rsvd1:3; 412 u8 rsvd1:3;